Preview of MAPREDUCE-279 merged to trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/MR-279-merge@1158962 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/mapreduce/.eclipse.templates/.launches/AllMapredTests.launch b/hadoop-mapreduce/.eclipse.templates/.launches/AllMapredTests.launch
similarity index 100%
rename from mapreduce/.eclipse.templates/.launches/AllMapredTests.launch
rename to hadoop-mapreduce/.eclipse.templates/.launches/AllMapredTests.launch
diff --git a/mapreduce/.eclipse.templates/.launches/JobTracker.launch b/hadoop-mapreduce/.eclipse.templates/.launches/JobTracker.launch
similarity index 100%
rename from mapreduce/.eclipse.templates/.launches/JobTracker.launch
rename to hadoop-mapreduce/.eclipse.templates/.launches/JobTracker.launch
diff --git a/mapreduce/.eclipse.templates/.launches/SpecificTestTemplate.launch b/hadoop-mapreduce/.eclipse.templates/.launches/SpecificTestTemplate.launch
similarity index 100%
rename from mapreduce/.eclipse.templates/.launches/SpecificTestTemplate.launch
rename to hadoop-mapreduce/.eclipse.templates/.launches/SpecificTestTemplate.launch
diff --git a/mapreduce/.eclipse.templates/.launches/TaskTracker.launch b/hadoop-mapreduce/.eclipse.templates/.launches/TaskTracker.launch
similarity index 100%
rename from mapreduce/.eclipse.templates/.launches/TaskTracker.launch
rename to hadoop-mapreduce/.eclipse.templates/.launches/TaskTracker.launch
diff --git a/mapreduce/.eclipse.templates/README.txt b/hadoop-mapreduce/.eclipse.templates/README.txt
similarity index 100%
rename from mapreduce/.eclipse.templates/README.txt
rename to hadoop-mapreduce/.eclipse.templates/README.txt
diff --git a/mapreduce/.gitignore b/hadoop-mapreduce/.gitignore
similarity index 100%
rename from mapreduce/.gitignore
rename to hadoop-mapreduce/.gitignore
diff --git a/mapreduce/CHANGES.txt b/hadoop-mapreduce/CHANGES.txt
similarity index 100%
rename from mapreduce/CHANGES.txt
rename to hadoop-mapreduce/CHANGES.txt
diff --git a/hadoop-mapreduce/INSTALL b/hadoop-mapreduce/INSTALL
new file mode 100644
index 0000000..4592914
--- /dev/null
+++ b/hadoop-mapreduce/INSTALL
@@ -0,0 +1,89 @@
+To compile Hadoop Mapreduce next following, do the following:
+
+Step 1) Install dependencies for yarn
+
+See http://svn.apache.org/repos/asf/hadoop/common/branches/MR-279/mapreduce/yarn/README
+Make sure protbuf library is in your library path or set: export LD_LIBRARY_PATH=/usr/local/lib
+
+Step 2) Checkout
+
+svn checkout http://svn.apache.org/repos/asf/hadoop/common/branches/MR-279/
+
+Step 3) Build common
+
+Go to common directory
+ant veryclean mvn-install
+
+Step 4) Build HDFS
+
+Go to hdfs directory
+ant veryclean mvn-install -Dresolvers=internal
+
+Step 5) Build yarn and mapreduce
+
+Go to mapreduce directory
+export MAVEN_OPTS=-Xmx512m
+
+mvn clean install assembly:assembly
+ant veryclean jar jar-test -Dresolvers=internal
+
+In case you want to skip the tests run:
+
+mvn clean install assembly:assembly -DskipTests
+ant veryclean jar jar-test -Dresolvers=internal
+
+You will see a tarball in
+ls target/hadoop-mapreduce-1.0-SNAPSHOT-bin.tar.gz
+
+Step 6) Untar the tarball in a clean and different directory.
+say HADOOP_YARN_INSTALL
+
+To run Hadoop Mapreduce next applications :
+
+Step 7) cd $HADOOP_YARN_INSTALL
+
+Step 8) export the following variables:
+
+HADOOP_MAPRED_HOME=
+HADOOP_COMMON_HOME=
+HADOOP_HDFS_HOME=
+YARN_HOME=directory where you untarred yarn
+HADOOP_CONF_DIR=
+YARN_CONF_DIR=$HADOOP_CONF_DIR
+
+Step 9) bin/yarn-daemon.sh start resourcemanager
+
+Step 10) bin/yarn-daemon.sh start nodemanager
+
+Step 11) bin/yarn-daemon.sh start historyserver
+
+Step 12) Create the following symlinks in hadoop-common/lib
+
+ln -s $HADOOP_YARN_INSTALL/modules/hadoop-mapreduce-client-app-1.0-SNAPSHOT.jar .
+ln -s $HADOOP_YARN_INSTALL/modules/yarn-api-1.0-SNAPSHOT.jar .
+ln -s $HADOOP_YARN_INSTALL/modules/hadoop-mapreduce-client-common-1.0-SNAPSHOT.jar .
+ln -s $HADOOP_YARN_INSTALL/modules/yarn-common-1.0-SNAPSHOT.jar .
+ln -s $HADOOP_YARN_INSTALL/modules/hadoop-mapreduce-client-core-1.0-SNAPSHOT.jar .
+ln -s $HADOOP_YARN_INSTALL/modules/yarn-server-common-1.0-SNAPSHOT.jar .
+ln -s $HADOOP_YARN_INSTALL/modules/hadoop-mapreduce-client-jobclient-1.0-SNAPSHOT.jar .
+ln -s $HADOOP_YARN_INSTALL/lib/protobuf-java-2.4.0a.jar .
+
+Step 13) Yarn daemons are up! But for running mapreduce applications, which now are in user land, you need to setup nodemanager with the following configuration in your yarn-site.xml before you start the nodemanager.
+ <property>
+ <name>nodemanager.auxiluary.services</name>
+ <value>mapreduce.shuffle</value>
+ </property>
+
+ <property>
+ <name>nodemanager.aux.service.mapreduce.shuffle.class</name>
+ <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+ </property>
+
+Step 14) You are all set, an example on how to run a mapreduce job is:
+
+cd $HADOOP_MAPRED_HOME
+ant examples -Dresolvers=internal
+$HADOOP_COMMON_HOME/bin/hadoop jar $HADOOP_MAPRED_HOME/build/hadoop-mapred-examples-0.22.0-SNAPSHOT.jar randomwriter -Dmapreduce.job.user.name=$USER -Dmapreduce.clientfactory.class.name=org.apache.hadoop.mapred.YarnClientFactory -Dmapreduce.randomwriter.bytespermap=10000 -Ddfs.blocksize=536870912 -Ddfs.block.size=536870912 -libjars $HADOOP_YARN_INSTALL/hadoop-mapreduce-1.0-SNAPSHOT/modules/hadoop-mapreduce-client-jobclient-1.0-SNAPSHOT.jar output
+
+The output on the command line should be almost similar to what you see in the JT/TT setup (Hadoop 0.20/0.21)
+
diff --git a/mapreduce/LICENSE.txt b/hadoop-mapreduce/LICENSE.txt
similarity index 100%
rename from mapreduce/LICENSE.txt
rename to hadoop-mapreduce/LICENSE.txt
diff --git a/mapreduce/NOTICE.txt b/hadoop-mapreduce/NOTICE.txt
similarity index 100%
rename from mapreduce/NOTICE.txt
rename to hadoop-mapreduce/NOTICE.txt
diff --git a/hadoop-mapreduce/assembly/all.xml b/hadoop-mapreduce/assembly/all.xml
new file mode 100644
index 0000000..32c9a79
--- /dev/null
+++ b/hadoop-mapreduce/assembly/all.xml
@@ -0,0 +1,101 @@
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+ <id>all</id>
+ <formats>
+ <format>tar.gz</format>
+ </formats>
+ <includeBaseDirectory>true</includeBaseDirectory>
+ <!-- TODO: this layout is wrong. We need module specific bin files in module specific dirs -->
+ <fileSets>
+ <fileSet>
+ <directory>hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/target/classes/bin</directory>
+ <outputDirectory>bin</outputDirectory>
+ <includes>
+ <include>container-executor</include>
+ </includes>
+ <fileMode>0755</fileMode>
+ </fileSet>
+ <fileSet>
+ <directory>hadoop-yarn/bin</directory>
+ <outputDirectory>bin</outputDirectory>
+ <includes>
+ <include>*</include>
+ </includes>
+ <fileMode>0755</fileMode>
+ </fileSet>
+ <fileSet>
+ <directory>bin</directory>
+ <outputDirectory>bin</outputDirectory>
+ <includes>
+ <include>*</include>
+ </includes>
+ <fileMode>0755</fileMode>
+ </fileSet>
+ <fileSet>
+ <directory>hadoop-yarn/conf</directory>
+ <outputDirectory>conf</outputDirectory>
+ <includes>
+ <include>**/*</include>
+ </includes>
+ </fileSet>
+ <fileSet>
+ <outputDirectory>sources</outputDirectory>
+ <excludes>
+ <exclude>**/*.jar</exclude>
+ <exclude>**/target/**</exclude>
+ <!-- scripts to include later for setting fileMode -->
+ <exclude>**/bin/*</exclude>
+ <exclude>**/scripts/*</exclude>
+ <!-- images that we don't need (and cause problems for our tools) -->
+ <exclude>**/dt-*/images/**</exclude>
+ <!-- until the code that does this is fixed -->
+ <exclude>**/file:/**</exclude>
+ <exclude>**/SecurityAuth.audit*</exclude>
+ </excludes>
+ <includes>
+ <include>assembly/**</include>
+ <include>pom.xml</include>
+ <include>build*.xml</include>
+ <include>ivy.xml</include>
+ <include>ivy/**</include>
+ <include>INSTALL</include>
+ <include>LICENSE.txt</include>
+ <include>mr-client/**</include>
+ <include>hadoop-yarn/**</include>
+ <include>src/**</include>
+ </includes>
+ </fileSet>
+ <fileSet>
+ <outputDirectory>sources</outputDirectory>
+ <includes>
+ <include>**/bin/*</include>
+ <include>**/scripts/*</include>
+ </includes>
+ <fileMode>0755</fileMode>
+ </fileSet>
+ </fileSets>
+ <moduleSets>
+ <moduleSet>
+ <excludes>
+ <exclude>org.apache.hadoop:hadoop-yarn-server-tests</exclude>
+ </excludes>
+ <binaries>
+ <outputDirectory>modules</outputDirectory>
+ <includeDependencies>false</includeDependencies>
+ <unpack>false</unpack>
+ </binaries>
+ </moduleSet>
+ </moduleSets>
+ <dependencySets>
+ <dependencySet>
+ <useProjectArtifact>false</useProjectArtifact>
+ <outputDirectory>/lib</outputDirectory>
+ <!-- Exclude hadoop artifacts. They will be found via HADOOP* env -->
+ <excludes>
+ <exclude>org.apache.hadoop:hadoop-common</exclude>
+ <exclude>org.apache.hadoop:hadoop-hdfs</exclude>
+ </excludes>
+ </dependencySet>
+ </dependencySets>
+</assembly>
diff --git a/mapreduce/bin/mapred b/hadoop-mapreduce/bin/mapred
similarity index 100%
rename from mapreduce/bin/mapred
rename to hadoop-mapreduce/bin/mapred
diff --git a/mapreduce/bin/mapred-config.sh b/hadoop-mapreduce/bin/mapred-config.sh
similarity index 100%
rename from mapreduce/bin/mapred-config.sh
rename to hadoop-mapreduce/bin/mapred-config.sh
diff --git a/mapreduce/bin/start-mapred.sh b/hadoop-mapreduce/bin/start-mapred.sh
similarity index 100%
rename from mapreduce/bin/start-mapred.sh
rename to hadoop-mapreduce/bin/start-mapred.sh
diff --git a/mapreduce/bin/stop-mapred.sh b/hadoop-mapreduce/bin/stop-mapred.sh
similarity index 100%
rename from mapreduce/bin/stop-mapred.sh
rename to hadoop-mapreduce/bin/stop-mapred.sh
diff --git a/mapreduce/build-utils.xml b/hadoop-mapreduce/build-utils.xml
similarity index 100%
rename from mapreduce/build-utils.xml
rename to hadoop-mapreduce/build-utils.xml
diff --git a/mapreduce/build.xml b/hadoop-mapreduce/build.xml
similarity index 100%
rename from mapreduce/build.xml
rename to hadoop-mapreduce/build.xml
diff --git a/mapreduce/conf/capacity-scheduler.xml.template b/hadoop-mapreduce/conf/capacity-scheduler.xml.template
similarity index 100%
rename from mapreduce/conf/capacity-scheduler.xml.template
rename to hadoop-mapreduce/conf/capacity-scheduler.xml.template
diff --git a/mapreduce/conf/configuration.xsl b/hadoop-mapreduce/conf/configuration.xsl
similarity index 100%
rename from mapreduce/conf/configuration.xsl
rename to hadoop-mapreduce/conf/configuration.xsl
diff --git a/mapreduce/conf/fair-scheduler.xml.template b/hadoop-mapreduce/conf/fair-scheduler.xml.template
similarity index 100%
rename from mapreduce/conf/fair-scheduler.xml.template
rename to hadoop-mapreduce/conf/fair-scheduler.xml.template
diff --git a/mapreduce/conf/mapred-queues.xml.template b/hadoop-mapreduce/conf/mapred-queues.xml.template
similarity index 100%
rename from mapreduce/conf/mapred-queues.xml.template
rename to hadoop-mapreduce/conf/mapred-queues.xml.template
diff --git a/mapreduce/conf/mapred-site.xml.template b/hadoop-mapreduce/conf/mapred-site.xml.template
similarity index 100%
rename from mapreduce/conf/mapred-site.xml.template
rename to hadoop-mapreduce/conf/mapred-site.xml.template
diff --git a/mapreduce/conf/taskcontroller.cfg b/hadoop-mapreduce/conf/taskcontroller.cfg
similarity index 100%
rename from mapreduce/conf/taskcontroller.cfg
rename to hadoop-mapreduce/conf/taskcontroller.cfg
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/pom.xml b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/pom.xml
new file mode 100644
index 0000000..550f186
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/pom.xml
@@ -0,0 +1,103 @@
+<?xml version="1.0"?>
+<project>
+ <parent>
+ <artifactId>hadoop-mapreduce-client</artifactId>
+ <groupId>org.apache.hadoop</groupId>
+ <version>${hadoop-mapreduce.version}</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-app</artifactId>
+ <name>hadoop-mapreduce-client-app</name>
+
+ <properties>
+ <install.file>${project.artifact.file}</install.file>
+ <applink.base>${project.build.directory}/${project.name}</applink.base>
+ <mr.basedir>${project.parent.parent.basedir}</mr.basedir>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-common</artifactId>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-common</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-nodemanager</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-shuffle</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <!-- local name for links -->
+ <finalName>mr-app</finalName>
+ <plugins>
+ <plugin>
+ <artifactId>maven-jar-plugin</artifactId>
+ <executions>
+ <execution>
+ <goals>
+ <goal>test-jar</goal>
+ </goals>
+ <phase>test-compile</phase>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-dependency-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>build-classpath</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>build-classpath</goal>
+ </goals>
+ <configuration>
+ <outputFile>target/classes/mrapp-generated-classpath</outputFile>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>create-mr-app-symlinks</id>
+ <phase>package</phase>
+ <configuration>
+ <target>
+ <symlink link="${applink.base}.jar"
+ resource="mr-app.jar" failonerror="false"/>
+ <symlink link="${applink.base}-1.0-SNAPSHOT.jar"
+ resource="mr-app.jar" failonerror="false"/>
+ </target>
+ </configuration>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/CustomOutputCommitter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/CustomOutputCommitter.java
new file mode 100644
index 0000000..468e9d1
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/CustomOutputCommitter.java
@@ -0,0 +1,67 @@
+package org.apache.hadoop;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.FileOutputFormat;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobContext;
+import org.apache.hadoop.mapred.OutputCommitter;
+import org.apache.hadoop.mapred.TaskAttemptContext;
+
+public class CustomOutputCommitter extends OutputCommitter {
+
+ public static final String JOB_SETUP_FILE_NAME = "_job_setup";
+ public static final String JOB_COMMIT_FILE_NAME = "_job_commit";
+ public static final String JOB_ABORT_FILE_NAME = "_job_abort";
+ public static final String TASK_SETUP_FILE_NAME = "_task_setup";
+ public static final String TASK_ABORT_FILE_NAME = "_task_abort";
+ public static final String TASK_COMMIT_FILE_NAME = "_task_commit";
+
+ @Override
+ public void setupJob(JobContext jobContext) throws IOException {
+ writeFile(jobContext.getJobConf(), JOB_SETUP_FILE_NAME);
+ }
+
+ @Override
+ public void commitJob(JobContext jobContext) throws IOException {
+ super.commitJob(jobContext);
+ writeFile(jobContext.getJobConf(), JOB_COMMIT_FILE_NAME);
+ }
+
+ @Override
+ public void abortJob(JobContext jobContext, int status)
+ throws IOException {
+ super.abortJob(jobContext, status);
+ writeFile(jobContext.getJobConf(), JOB_ABORT_FILE_NAME);
+ }
+
+ @Override
+ public void setupTask(TaskAttemptContext taskContext) throws IOException {
+ writeFile(taskContext.getJobConf(), TASK_SETUP_FILE_NAME);
+ }
+
+ @Override
+ public boolean needsTaskCommit(TaskAttemptContext taskContext)
+ throws IOException {
+ return true;
+ }
+
+ @Override
+ public void commitTask(TaskAttemptContext taskContext) throws IOException {
+ writeFile(taskContext.getJobConf(), TASK_COMMIT_FILE_NAME);
+ }
+
+ @Override
+ public void abortTask(TaskAttemptContext taskContext) throws IOException {
+ writeFile(taskContext.getJobConf(), TASK_ABORT_FILE_NAME);
+ }
+
+ private void writeFile(JobConf conf , String filename) throws IOException {
+ System.out.println("writing file ----" + filename);
+ Path outputPath = FileOutputFormat.getOutputPath(conf);
+ FileSystem fs = outputPath.getFileSystem(conf);
+ fs.create(new Path(outputPath, filename)).close();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
new file mode 100644
index 0000000..5f664d1
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
@@ -0,0 +1,455 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.net.URI;
+import java.util.HashSet;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FSError;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.mapreduce.JobCounter;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.AMConstants;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.service.AbstractService;
+
+/**
+ * Runs the container task locally in a thread.
+ * Since all (sub)tasks share the same local directory, they must be executed
+ * sequentially in order to avoid creating/deleting the same files/dirs.
+ */
+public class LocalContainerLauncher extends AbstractService implements
+ ContainerLauncher {
+
+ private static final File curDir = new File(".");
+ private static final Log LOG = LogFactory.getLog(LocalContainerLauncher.class);
+
+ private FileContext curFC = null;
+ private final HashSet<File> localizedFiles;
+ private final AppContext context;
+ private final TaskUmbilicalProtocol umbilical;
+ private Thread eventHandlingThread;
+ private BlockingQueue<ContainerLauncherEvent> eventQueue =
+ new LinkedBlockingQueue<ContainerLauncherEvent>();
+
+ public LocalContainerLauncher(AppContext context,
+ TaskUmbilicalProtocol umbilical) {
+ super(LocalContainerLauncher.class.getName());
+ this.context = context;
+ this.umbilical = umbilical;
+ // umbilical: MRAppMaster creates (taskAttemptListener), passes to us (TODO/FIXME: pointless to use RPC to talk to self; should create LocalTaskAttemptListener or similar: implement umbilical protocol but skip RPC stuff)
+
+ try {
+ curFC = FileContext.getFileContext(curDir.toURI());
+ } catch (UnsupportedFileSystemException ufse) {
+ LOG.error("Local filesystem " + curDir.toURI().toString()
+ + " is unsupported?? (should never happen)");
+ }
+
+ // Save list of files/dirs that are supposed to be present so can delete
+ // any extras created by one task before starting subsequent task. Note
+ // that there's no protection against deleted or renamed localization;
+ // users who do that get what they deserve (and will have to disable
+ // uberization in order to run correctly).
+ File[] curLocalFiles = curDir.listFiles();
+ localizedFiles = new HashSet<File>(curLocalFiles.length);
+ for (int j = 0; j < curLocalFiles.length; ++j) {
+ localizedFiles.add(curLocalFiles[j]);
+ }
+
+ // Relocalization note/future FIXME (per chrisdo, 20110315): At moment,
+ // full localization info is in AppSubmissionContext passed from client to
+ // RM and then to NM for AM-container launch: no difference between AM-
+ // localization and MapTask- or ReduceTask-localization, so can assume all
+ // OK. Longer-term, will need to override uber-AM container-localization
+ // request ("needed resources") with union of regular-AM-resources + task-
+ // resources (and, if maps and reduces ever differ, then union of all three
+ // types), OR will need localizer service/API that uber-AM can request
+ // after running (e.g., "localizeForTask()" or "localizeForMapTask()").
+ }
+
+ public void start() {
+ eventHandlingThread = new Thread(new SubtaskRunner(), "uber-SubtaskRunner");
+ eventHandlingThread.start();
+ super.start();
+ }
+
+ public void stop() {
+ eventHandlingThread.interrupt();
+ super.stop();
+ }
+
+ @Override
+ public void handle(ContainerLauncherEvent event) {
+ try {
+ eventQueue.put(event);
+ } catch (InterruptedException e) {
+ throw new YarnException(e); // FIXME? YarnException is "for runtime exceptions only"
+ }
+ }
+
+
+ /*
+ * Uber-AM lifecycle/ordering ("normal" case):
+ *
+ * - [somebody] sends TA_ASSIGNED
+ * - handled by ContainerAssignedTransition (TaskAttemptImpl.java)
+ * - creates "remoteTask" for us == real Task
+ * - sends CONTAINER_REMOTE_LAUNCH
+ * - TA: UNASSIGNED -> ASSIGNED
+ * - CONTAINER_REMOTE_LAUNCH handled by LocalContainerLauncher (us)
+ * - sucks "remoteTask" out of TaskAttemptImpl via getRemoteTask()
+ * - sends TA_CONTAINER_LAUNCHED
+ * [[ elsewhere...
+ * - TA_CONTAINER_LAUNCHED handled by LaunchedContainerTransition
+ * - registers "remoteTask" with TaskAttemptListener (== umbilical)
+ * - NUKES "remoteTask"
+ * - sends T_ATTEMPT_LAUNCHED (Task: SCHEDULED -> RUNNING)
+ * - TA: ASSIGNED -> RUNNING
+ * ]]
+ * - runs Task (runSubMap() or runSubReduce())
+ * - TA can safely send TA_UPDATE since in RUNNING state
+ * [modulo possible TA-state-machine race noted below: CHECK (TODO)]
+ */
+ private class SubtaskRunner implements Runnable {
+
+ private boolean doneWithMaps = false;
+ private int finishedSubMaps = 0;
+
+ SubtaskRunner() {
+ }
+
+ @Override
+ public void run() {
+ ContainerLauncherEvent event = null;
+
+ // _must_ either run subtasks sequentially or accept expense of new JVMs
+ // (i.e., fork()), else will get weird failures when maps try to create/
+ // write same dirname or filename: no chdir() in Java
+ while (!Thread.currentThread().isInterrupted()) {
+ try {
+ event = eventQueue.take();
+ } catch (InterruptedException e) { // mostly via T_KILL? JOB_KILL?
+ LOG.error("Returning, interrupted : " + e);
+ return;
+ }
+
+ LOG.info("Processing the event " + event.toString());
+
+ if (event.getType() == EventType.CONTAINER_REMOTE_LAUNCH) {
+
+ ContainerRemoteLaunchEvent launchEv =
+ (ContainerRemoteLaunchEvent)event;
+ TaskAttemptId attemptID = launchEv.getTaskAttemptID(); //FIXME: can attemptID ever be null? (only if retrieved over umbilical?)
+
+ Job job = context.getAllJobs().get(attemptID.getTaskId().getJobId());
+ int numMapTasks = job.getTotalMaps();
+ int numReduceTasks = job.getTotalReduces();
+
+ // YARN (tracking) Task:
+ org.apache.hadoop.mapreduce.v2.app.job.Task ytask =
+ job.getTask(attemptID.getTaskId());
+ // classic mapred Task:
+ org.apache.hadoop.mapred.Task remoteTask = launchEv.getRemoteTask();
+
+ // after "launching," send launched event to task attempt to move
+ // state from ASSIGNED to RUNNING (also nukes "remoteTask", so must
+ // do getRemoteTask() call first)
+ context.getEventHandler().handle(
+ new TaskAttemptEvent(attemptID,
+ TaskAttemptEventType.TA_CONTAINER_LAUNCHED)); //FIXME: race condition here? or do we have same kind of lock on TA handler => MapTask can't send TA_UPDATE before TA_CONTAINER_LAUNCHED moves TA to RUNNING state? (probably latter)
+
+ if (numMapTasks == 0) {
+ doneWithMaps = true;
+ }
+
+ try {
+ if (remoteTask.isMapOrReduce()) {
+ JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.getTaskId().getJobId());
+ jce.addCounterUpdate(JobCounter.TOTAL_LAUNCHED_UBERTASKS, 1);
+ if (remoteTask.isMapTask()) {
+ jce.addCounterUpdate(JobCounter.NUM_UBER_SUBMAPS, 1);
+ } else {
+ jce.addCounterUpdate(JobCounter.NUM_UBER_SUBREDUCES, 1);
+ }
+ context.getEventHandler().handle(jce);
+ }
+ runSubtask(remoteTask, ytask.getType(), attemptID, numMapTasks,
+ (numReduceTasks > 0));
+
+ } catch (RuntimeException re) {
+ JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.getTaskId().getJobId());
+ jce.addCounterUpdate(JobCounter.NUM_FAILED_UBERTASKS, 1);
+ context.getEventHandler().handle(jce);
+ // this is our signal that the subtask failed in some way, so
+ // simulate a failed JVM/container and send a container-completed
+ // event to task attempt (i.e., move state machine from RUNNING
+ // to FAIL_CONTAINER_CLEANUP [and ultimately to FAILED])
+ context.getEventHandler().handle(new TaskAttemptEvent(attemptID,
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED));
+ } catch (IOException ioe) {
+ // if umbilical itself barfs (in error-handler of runSubMap()),
+ // we're pretty much hosed, so do what YarnChild main() does
+ // (i.e., exit clumsily--but can never happen, so no worries!)
+ LOG.fatal("oopsie... this can never happen: "
+ + StringUtils.stringifyException(ioe));
+ System.exit(-1);
+ }
+
+ } else if (event.getType() == EventType.CONTAINER_REMOTE_CLEANUP) {
+
+ // no container to kill, so just send "cleaned" event to task attempt
+ // to move us from SUCCESS_CONTAINER_CLEANUP to SUCCEEDED state
+ // (or {FAIL|KILL}_CONTAINER_CLEANUP to {FAIL|KILL}_TASK_CLEANUP)
+ context.getEventHandler().handle(
+ new TaskAttemptEvent(event.getTaskAttemptID(),
+ TaskAttemptEventType.TA_CONTAINER_CLEANED));
+
+ } else {
+ LOG.warn("Ignoring unexpected event " + event.toString());
+ }
+
+ }
+ }
+
+ private void runSubtask(org.apache.hadoop.mapred.Task task,
+ final TaskType taskType,
+ TaskAttemptId attemptID,
+ final int numMapTasks,
+ boolean renameOutputs)
+ throws RuntimeException, IOException {
+ org.apache.hadoop.mapred.TaskAttemptID classicAttemptID =
+ TypeConverter.fromYarn(attemptID);
+
+ try {
+ JobConf conf = new JobConf(getConfig());
+
+ // mark this as an uberized subtask so it can set task counter
+ // (longer-term/FIXME: could redefine as job counter and send
+ // "JobCounterEvent" to JobImpl on [successful] completion of subtask;
+ // will need new Job state-machine transition and JobImpl jobCounters
+ // map to handle)
+ conf.setBoolean("mapreduce.task.uberized", true);
+
+ // META-FIXME: do we want the extra sanity-checking (doneWithMaps,
+ // etc.), or just assume/hope the state machine(s) and uber-AM work
+ // as expected?
+ if (taskType == TaskType.MAP) {
+ if (doneWithMaps) {
+ LOG.error("CONTAINER_REMOTE_LAUNCH contains a map task ("
+ + attemptID + "), but should be finished with maps");
+ // throw new RuntimeException() (FIXME: what's appropriate here?)
+ }
+
+ MapTask map = (MapTask)task;
+
+ //CODE-REVIEWER QUESTION: why not task.getConf() or map.getConf() instead of conf? do we need Task's localizeConfiguration() run on this first?
+ map.run(conf, umbilical);
+
+ if (renameOutputs) {
+ renameMapOutputForReduce(conf, attemptID, map.getMapOutputFile());
+ }
+ relocalize();
+
+ if (++finishedSubMaps == numMapTasks) {
+ doneWithMaps = true;
+ }
+
+ } else /* TaskType.REDUCE */ {
+
+ if (!doneWithMaps) {
+ //check if event-queue empty? whole idea of counting maps vs. checking event queue is a tad wacky...but could enforce ordering (assuming no "lost events") at LocalMRAppMaster [CURRENT BUG(?): doesn't send reduce event until maps all done]
+ LOG.error("CONTAINER_REMOTE_LAUNCH contains a reduce task ("
+ + attemptID + "), but not yet finished with maps");
+ // throw new RuntimeException() (FIXME) // or push reduce event back onto end of queue? (probably former)
+ }
+
+ ReduceTask reduce = (ReduceTask)task;
+
+ // a.k.a. "mapreduce.jobtracker.address" in LocalJobRunner:
+ conf.set(MRConfig.MASTER_ADDRESS, "local"); // bypass shuffle
+
+ reduce.run(conf, umbilical);
+ //relocalize(); // needed only if more than one reducer supported (is MAPREDUCE-434 fixed yet?)
+ }
+
+ } catch (FSError e) {
+ LOG.fatal("FSError from child", e);
+ // umbilical: MRAppMaster creates (taskAttemptListener), passes to us
+ umbilical.fsError(classicAttemptID, e.getMessage());
+ throw new RuntimeException();
+
+ } catch (Exception exception) {
+ LOG.warn("Exception running local (uberized) 'child' : "
+ + StringUtils.stringifyException(exception));
+ try {
+ if (task != null) {
+ // do cleanup for the task
+// if (childUGI == null) { // no need to job into doAs block
+ task.taskCleanup(umbilical);
+// } else {
+// final Task taskFinal = task;
+// childUGI.doAs(new PrivilegedExceptionAction<Object>() {
+// @Override
+// public Object run() throws Exception {
+// taskFinal.taskCleanup(umbilical);
+// return null;
+// }
+// });
+// }
+ }
+ } catch (Exception e) {
+ LOG.info("Exception cleaning up: "
+ + StringUtils.stringifyException(e));
+ }
+ // Report back any failures, for diagnostic purposes
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ exception.printStackTrace(new PrintStream(baos));
+// if (classicAttemptID != null) {
+ umbilical.reportDiagnosticInfo(classicAttemptID, baos.toString());
+// }
+ throw new RuntimeException();
+
+ } catch (Throwable throwable) {
+ LOG.fatal("Error running local (uberized) 'child' : "
+ + StringUtils.stringifyException(throwable));
+// if (classicAttemptID != null) {
+ Throwable tCause = throwable.getCause();
+ String cause = (tCause == null)
+ ? throwable.getMessage()
+ : StringUtils.stringifyException(tCause);
+ umbilical.fatalError(classicAttemptID, cause);
+// }
+ throw new RuntimeException();
+
+ } finally {
+/*
+FIXME: do we need to do any of this stuff? (guessing not since not in own JVM)
+ RPC.stopProxy(umbilical);
+ DefaultMetricsSystem.shutdown();
+ // Shutting down log4j of the child-vm...
+ // This assumes that on return from Task.run()
+ // there is no more logging done.
+ LogManager.shutdown();
+ */
+ }
+ }
+
+
+/* FIXME: may not need renameMapOutputForReduce() anymore? TEST!
+
+${local.dir}/usercache/$user/appcache/$appId/$contId/ == $cwd for tasks;
+contains task.sh script, which, when executed, creates symlinks and sets up env
+ "$local.dir"/usercache/$user/appcache/$appId/$contId/file.out
+ "$local.dir"/usercache/$user/appcache/$appId/$contId/file.out.idx (?)
+ "$local.dir"/usercache/$user/appcache/$appId/output/$taskId/ is where file.out* is moved after MapTask done
+
+ OHO! no further need for this at all? $taskId is unique per subtask
+ now => should work fine to leave alone. TODO: test with teragen or
+ similar
+ */
+
+ /**
+ * Within the _local_ filesystem (not HDFS), all activity takes place within
+ * a single subdir (${local.dir}/usercache/$user/appcache/$appId/$contId/),
+ * and all sub-MapTasks create the same filename ("file.out"). Rename that
+ * to something unique (e.g., "map_0.out") to avoid collisions.
+ *
+ * Longer-term, we'll modify [something] to use TaskAttemptID-based
+ * filenames instead of "file.out". (All of this is entirely internal,
+ * so there are no particular compatibility issues.)
+ */
+ private void renameMapOutputForReduce(JobConf conf, TaskAttemptId mapId,
+ MapOutputFile subMapOutputFile)
+ throws IOException {
+ FileSystem localFs = FileSystem.getLocal(conf);
+ // move map output to reduce input
+ Path mapOut = subMapOutputFile.getOutputFile();
+ Path reduceIn = subMapOutputFile.getInputFileForWrite(
+ TypeConverter.fromYarn(mapId).getTaskID(), localFs.getLength(mapOut));
+ if (!localFs.mkdirs(reduceIn.getParent())) {
+ throw new IOException("Mkdirs failed to create "
+ + reduceIn.getParent().toString());
+ }
+ if (!localFs.rename(mapOut, reduceIn))
+ throw new IOException("Couldn't rename " + mapOut);
+ }
+
+ /**
+ * Also within the local filesystem, we need to restore the initial state
+ * of the directory as much as possible. Compare current contents against
+ * the saved original state and nuke everything that doesn't belong, with
+ * the exception of the renamed map outputs (see above).
+FIXME: do we really need to worry about renamed map outputs, or already moved to output dir on commit? if latter, fix comment
+ *
+ * Any jobs that go out of their way to rename or delete things from the
+ * local directory are considered broken and deserve what they get...
+ */
+ private void relocalize() {
+ File[] curLocalFiles = curDir.listFiles();
+ for (int j = 0; j < curLocalFiles.length; ++j) {
+ if (!localizedFiles.contains(curLocalFiles[j])) {
+ // found one that wasn't there before: delete it
+ boolean deleted = false;
+ try {
+ if (curFC != null) {
+ // this is recursive, unlike File delete():
+ deleted = curFC.delete(new Path(curLocalFiles[j].getName()),true);
+ }
+ } catch (IOException e) {
+ deleted = false;
+ }
+ if (!deleted) {
+ LOG.warn("Unable to delete unexpected local file/dir "
+ + curLocalFiles[j].getName() + ": insufficient permissions?");
+ }
+ }
+ }
+ }
+
+ } // end SubtaskRunner
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
new file mode 100644
index 0000000..d9d5b1f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
@@ -0,0 +1,264 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+import java.io.File;
+import java.net.InetSocketAddress;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Vector;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.mapred.TaskLog.LogName;
+import org.apache.hadoop.mapreduce.ID;
+import org.apache.hadoop.util.StringUtils;
+
+public class MapReduceChildJVM {
+ private static final String SYSTEM_PATH_SEPARATOR =
+ System.getProperty("path.separator");
+
+ private static final Log LOG = LogFactory.getLog(MapReduceChildJVM.class);
+
+ private static File getTaskLogFile(String logDir, LogName filter) {
+ return new File(logDir, filter.toString());
+ }
+
+ private static String getChildEnv(JobConf jobConf, boolean isMap) {
+ if (isMap) {
+ return jobConf.get(JobConf.MAPRED_MAP_TASK_ENV,
+ jobConf.get(JobConf.MAPRED_TASK_ENV));
+ }
+ return jobConf.get(JobConf.MAPRED_REDUCE_TASK_ENV,
+ jobConf.get(jobConf.MAPRED_TASK_ENV));
+ }
+
+ public static void setVMEnv(Map<String, String> env,
+ List<String> classPaths, String pwd, String containerLogDir,
+ String nmLdLibraryPath, Task task, CharSequence applicationTokensFile) {
+
+ JobConf conf = task.conf;
+
+ // Add classpath.
+ CharSequence cp = env.get("CLASSPATH");
+ String classpath = StringUtils.join(SYSTEM_PATH_SEPARATOR, classPaths);
+ if (null == cp) {
+ env.put("CLASSPATH", classpath);
+ } else {
+ env.put("CLASSPATH", classpath + SYSTEM_PATH_SEPARATOR + cp);
+ }
+
+ /////// Environmental variable LD_LIBRARY_PATH
+ StringBuilder ldLibraryPath = new StringBuilder();
+
+ ldLibraryPath.append(nmLdLibraryPath);
+ ldLibraryPath.append(SYSTEM_PATH_SEPARATOR);
+ ldLibraryPath.append(pwd);
+ env.put("LD_LIBRARY_PATH", ldLibraryPath.toString());
+ /////// Environmental variable LD_LIBRARY_PATH
+
+ // for the child of task jvm, set hadoop.root.logger
+ env.put("HADOOP_ROOT_LOGGER", "DEBUG,CLA"); // TODO: Debug
+
+ // TODO: The following is useful for instance in streaming tasks. Should be
+ // set in ApplicationMaster's env by the RM.
+ String hadoopClientOpts = System.getenv("HADOOP_CLIENT_OPTS");
+ if (hadoopClientOpts == null) {
+ hadoopClientOpts = "";
+ } else {
+ hadoopClientOpts = hadoopClientOpts + " ";
+ }
+ // FIXME: don't think this is also needed given we already set java
+ // properties.
+ long logSize = TaskLog.getTaskLogLength(conf);
+ Vector<String> logProps = new Vector<String>(4);
+ setupLog4jProperties(logProps, logSize, containerLogDir);
+ Iterator<String> it = logProps.iterator();
+ StringBuffer buffer = new StringBuffer();
+ while (it.hasNext()) {
+ buffer.append(" " + it.next());
+ }
+ hadoopClientOpts = hadoopClientOpts + buffer.toString();
+
+ env.put("HADOOP_CLIENT_OPTS", hadoopClientOpts);
+
+ // add the env variables passed by the user
+ String mapredChildEnv = getChildEnv(conf, task.isMapTask());
+ if (mapredChildEnv != null && mapredChildEnv.length() > 0) {
+ String childEnvs[] = mapredChildEnv.split(",");
+ for (String cEnv : childEnvs) {
+ String[] parts = cEnv.split("="); // split on '='
+ String value = (String) env.get(parts[0]);
+ if (value != null) {
+ // replace $env with the child's env constructed by tt's
+ // example LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp
+ value = parts[1].replace("$" + parts[0], value);
+ } else {
+ // this key is not configured by the tt for the child .. get it
+ // from the tt's env
+ // example PATH=$PATH:/tmp
+ value = System.getenv(parts[0]); // Get from NM?
+ if (value != null) {
+ // the env key is present in the tt's env
+ value = parts[1].replace("$" + parts[0], value);
+ } else {
+ // the env key is note present anywhere .. simply set it
+ // example X=$X:/tmp or X=/tmp
+ value = parts[1].replace("$" + parts[0], "");
+ }
+ }
+ env.put(parts[0], value);
+ }
+ }
+
+ //This should not be set here (If an OS check is requied. moved to ContainerLuanch)
+ // env.put("JVM_PID", "`echo $$`");
+
+ env.put(Constants.STDOUT_LOGFILE_ENV,
+ getTaskLogFile(containerLogDir, TaskLog.LogName.STDOUT).toString());
+ env.put(Constants.STDERR_LOGFILE_ENV,
+ getTaskLogFile(containerLogDir, TaskLog.LogName.STDERR).toString());
+ }
+
+ private static String getChildJavaOpts(JobConf jobConf, boolean isMapTask) {
+ if (isMapTask) {
+ return jobConf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS, jobConf.get(
+ JobConf.MAPRED_TASK_JAVA_OPTS,
+ JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS));
+ }
+ return jobConf
+ .get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, jobConf.get(
+ JobConf.MAPRED_TASK_JAVA_OPTS,
+ JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS));
+ }
+
+ private static void setupLog4jProperties(Vector<String> vargs,
+ long logSize, String containerLogDir) {
+ vargs.add("-Dlog4j.configuration=container-log4j.properties");
+ vargs.add("-Dhadoop.yarn.mr.containerLogDir=" + containerLogDir);
+ vargs.add("-Dhadoop.yarn.mr.totalLogFileSize=" + logSize);
+ }
+
+ public static List<String> getVMCommand(
+ InetSocketAddress taskAttemptListenerAddr, Task task, String javaHome,
+ String workDir, String logDir, String childTmpDir, ID jvmID) {
+
+ TaskAttemptID attemptID = task.getTaskID();
+ JobConf conf = task.conf;
+
+ Vector<String> vargs = new Vector<String>(8);
+
+ vargs.add("exec");
+ vargs.add(javaHome + "/bin/java");
+
+ // Add child (task) java-vm options.
+ //
+ // The following symbols if present in mapred.{map|reduce}.child.java.opts
+ // value are replaced:
+ // + @taskid@ is interpolated with value of TaskID.
+ // Other occurrences of @ will not be altered.
+ //
+ // Example with multiple arguments and substitutions, showing
+ // jvm GC logging, and start of a passwordless JVM JMX agent so can
+ // connect with jconsole and the likes to watch child memory, threads
+ // and get thread dumps.
+ //
+ // <property>
+ // <name>mapred.map.child.java.opts</name>
+ // <value>-Xmx 512M -verbose:gc -Xloggc:/tmp/@taskid@.gc \
+ // -Dcom.sun.management.jmxremote.authenticate=false \
+ // -Dcom.sun.management.jmxremote.ssl=false \
+ // </value>
+ // </property>
+ //
+ // <property>
+ // <name>mapred.reduce.child.java.opts</name>
+ // <value>-Xmx 1024M -verbose:gc -Xloggc:/tmp/@taskid@.gc \
+ // -Dcom.sun.management.jmxremote.authenticate=false \
+ // -Dcom.sun.management.jmxremote.ssl=false \
+ // </value>
+ // </property>
+ //
+ String javaOpts = getChildJavaOpts(conf, task.isMapTask());
+ javaOpts = javaOpts.replace("@taskid@", attemptID.toString());
+ String [] javaOptsSplit = javaOpts.split(" ");
+
+ // Add java.library.path; necessary for loading native libraries.
+ //
+ // 1. We add the 'cwd' of the task to it's java.library.path to help
+ // users distribute native libraries via the DistributedCache.
+ // 2. The user can also specify extra paths to be added to the
+ // java.library.path via mapred.{map|reduce}.child.java.opts.
+ //
+ String libraryPath = workDir;
+ boolean hasUserLDPath = false;
+ for(int i=0; i<javaOptsSplit.length ;i++) {
+ if(javaOptsSplit[i].startsWith("-Djava.library.path=")) {
+ // TODO: Does the above take care of escaped space chars
+ javaOptsSplit[i] += SYSTEM_PATH_SEPARATOR + libraryPath;
+ hasUserLDPath = true;
+ break;
+ }
+ }
+ if(!hasUserLDPath) {
+ vargs.add("-Djava.library.path=" + libraryPath);
+ }
+ for (int i = 0; i < javaOptsSplit.length; i++) {
+ vargs.add(javaOptsSplit[i]);
+ }
+
+ if (childTmpDir != null) {
+ vargs.add("-Djava.io.tmpdir=" + childTmpDir);
+ }
+
+ // Setup the log4j prop
+ long logSize = TaskLog.getTaskLogLength(conf);
+ setupLog4jProperties(vargs, logSize, logDir);
+
+ if (conf.getProfileEnabled()) {
+ if (conf.getProfileTaskRange(task.isMapTask()
+ ).isIncluded(task.getPartition())) {
+ File prof = getTaskLogFile(logDir, TaskLog.LogName.PROFILE);
+ vargs.add(String.format(conf.getProfileParams(), prof.toString()));
+ }
+ }
+
+ // Add main class and its arguments
+ vargs.add(YarnChild.class.getName()); // main of Child
+ // pass TaskAttemptListener's address
+ vargs.add(taskAttemptListenerAddr.getAddress().getHostAddress());
+ vargs.add(Integer.toString(taskAttemptListenerAddr.getPort()));
+ vargs.add(attemptID.toString()); // pass task identifier
+
+ // Finally add the jvmID
+ vargs.add(String.valueOf(jvmID.getId()));
+ vargs.add("1>" + getTaskLogFile(logDir, TaskLog.LogName.STDERR));
+ vargs.add("2>" + getTaskLogFile(logDir, TaskLog.LogName.STDOUT));
+
+ // Final commmand
+ StringBuilder mergedCommand = new StringBuilder();
+ for (CharSequence str : vargs) {
+ mergedCommand.append(str).append(" ");
+ }
+ Vector<String> vargsFinal = new Vector<String>(1);
+ vargsFinal.add(mergedCommand.toString());
+ return vargsFinal;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapTaskAttemptImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapTaskAttemptImpl.java
new file mode 100644
index 0000000..a948604
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapTaskAttemptImpl.java
@@ -0,0 +1,65 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+import java.util.Collection;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
+import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
+import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.event.EventHandler;
+
+public class MapTaskAttemptImpl extends TaskAttemptImpl {
+
+ private final TaskSplitMetaInfo splitInfo;
+
+ public MapTaskAttemptImpl(TaskId taskId, int attempt,
+ EventHandler eventHandler, Path jobFile,
+ int partition, TaskSplitMetaInfo splitInfo, Configuration conf,
+ TaskAttemptListener taskAttemptListener,
+ OutputCommitter committer, Token<JobTokenIdentifier> jobToken,
+ Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock) {
+ super(taskId, attempt, eventHandler,
+ taskAttemptListener, jobFile, partition, conf, splitInfo.getLocations(),
+ committer, jobToken, fsTokens, clock);
+ this.splitInfo = splitInfo;
+ }
+
+ @Override
+ public Task createRemoteTask() {
+ //job file name is set in TaskAttempt, setting it null here
+ MapTask mapTask =
+ new MapTask("", TypeConverter.fromYarn(getID()), partition,
+ splitInfo.getSplitIndex(), 1); // YARN doesn't have the concept of slots per task, set it as 1.
+ mapTask.setUser(conf.get(MRJobConfig.USER_NAME));
+ mapTask.setConf(conf);
+ return mapTask;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/ReduceTaskAttemptImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/ReduceTaskAttemptImpl.java
new file mode 100644
index 0000000..1034a56
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/ReduceTaskAttemptImpl.java
@@ -0,0 +1,64 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+import java.util.Collection;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
+import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.event.EventHandler;
+
+
+public class ReduceTaskAttemptImpl extends TaskAttemptImpl {
+
+ private final int numMapTasks;
+
+ public ReduceTaskAttemptImpl(TaskId id, int attempt,
+ EventHandler eventHandler, Path jobFile, int partition,
+ int numMapTasks, Configuration conf,
+ TaskAttemptListener taskAttemptListener, OutputCommitter committer,
+ Token<JobTokenIdentifier> jobToken,
+ Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock) {
+ super(id, attempt, eventHandler, taskAttemptListener, jobFile, partition,
+ conf, new String[] {}, committer, jobToken, fsTokens, clock);
+ this.numMapTasks = numMapTasks;
+ }
+
+ @Override
+ public Task createRemoteTask() {
+ //job file name is set in TaskAttempt, setting it null here
+ ReduceTask reduceTask =
+ new ReduceTask("", TypeConverter.fromYarn(getID()), partition,
+ numMapTasks, 1); // YARN doesn't have the concept of slots per task, set it as 1.
+ reduceTask.setUser(conf.get(MRJobConfig.USER_NAME));
+ reduceTask.setConf(conf);
+ return reduceTask;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
new file mode 100644
index 0000000..c12c60c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
@@ -0,0 +1,434 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RPC.Server;
+import org.apache.hadoop.ipc.VersionedProtocol;
+import org.apache.hadoop.mapred.SortedRanges.Range;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.AMConstants;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
+import org.apache.hadoop.mapreduce.v2.app.TaskHeartbeatHandler;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.service.CompositeService;
+
+/**
+ * This class is responsible for talking to the task umblical.
+ * It also converts all the old data structures
+ * to yarn data structures.
+ *
+ * This class HAS to be in this package to access package private
+ * methods/classes.
+ */
+public class TaskAttemptListenerImpl extends CompositeService
+ implements TaskUmbilicalProtocol, TaskAttemptListener {
+
+ private static final Log LOG = LogFactory.getLog(TaskAttemptListenerImpl.class);
+
+ private AppContext context;
+ private Server server;
+ private TaskHeartbeatHandler taskHeartbeatHandler;
+ private InetSocketAddress address;
+ private Map<WrappedJvmID, org.apache.hadoop.mapred.Task> jvmIDToAttemptMap =
+ Collections.synchronizedMap(new HashMap<WrappedJvmID,
+ org.apache.hadoop.mapred.Task>());
+ private JobTokenSecretManager jobTokenSecretManager = null;
+
+ public TaskAttemptListenerImpl(AppContext context,
+ JobTokenSecretManager jobTokenSecretManager) {
+ super(TaskAttemptListenerImpl.class.getName());
+ this.context = context;
+ this.jobTokenSecretManager = jobTokenSecretManager;
+ }
+
+ @Override
+ public void init(Configuration conf) {
+ registerHeartbeatHandler();
+ super.init(conf);
+ }
+
+ @Override
+ public void start() {
+ startRpcServer();
+ super.start();
+ }
+
+ protected void registerHeartbeatHandler() {
+ taskHeartbeatHandler = new TaskHeartbeatHandler(context.getEventHandler(),
+ context.getClock());
+ addService(taskHeartbeatHandler);
+ }
+
+ protected void startRpcServer() {
+ Configuration conf = getConfig();
+ try {
+ server =
+ RPC.getServer(TaskUmbilicalProtocol.class, this, "0.0.0.0", 0,
+ conf.getInt(AMConstants.AM_TASK_LISTENER_THREADS,
+ AMConstants.DEFAULT_AM_TASK_LISTENER_THREADS),
+ false, conf, jobTokenSecretManager);
+ server.start();
+ InetSocketAddress listenerAddress = server.getListenerAddress();
+ this.address =
+ NetUtils.createSocketAddr(listenerAddress.getAddress()
+ .getLocalHost().getCanonicalHostName()
+ + ":" + listenerAddress.getPort());
+ } catch (IOException e) {
+ throw new YarnException(e);
+ }
+ }
+
+ @Override
+ public void stop() {
+ stopRpcServer();
+ super.stop();
+ }
+
+ protected void stopRpcServer() {
+ server.stop();
+ }
+
+ @Override
+ public InetSocketAddress getAddress() {
+ return address;
+ }
+
+ /**
+ * Child checking whether it can commit.
+ *
+ * <br/>
+ * Commit is a two-phased protocol. First the attempt informs the
+ * ApplicationMaster that it is
+ * {@link #commitPending(TaskAttemptID, TaskStatus)}. Then it repeatedly polls
+ * the ApplicationMaster whether it {@link #canCommit(TaskAttemptID)} This is
+ * a legacy from the centralized commit protocol handling by the JobTracker.
+ */
+ @Override
+ public boolean canCommit(TaskAttemptID taskAttemptID) throws IOException {
+ LOG.info("Commit go/no-go request from " + taskAttemptID.toString());
+ // An attempt is asking if it can commit its output. This can be decided
+ // only by the task which is managing the multiple attempts. So redirect the
+ // request there.
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
+ TypeConverter.toYarn(taskAttemptID);
+
+ taskHeartbeatHandler.receivedPing(attemptID);
+
+ Job job = context.getJob(attemptID.getTaskId().getJobId());
+ Task task = job.getTask(attemptID.getTaskId());
+ return task.canCommit(attemptID);
+ }
+
+ /**
+ * TaskAttempt is reporting that it is in commit_pending and it is waiting for
+ * the commit Response
+ *
+ * <br/>
+ * Commit it a two-phased protocol. First the attempt informs the
+ * ApplicationMaster that it is
+ * {@link #commitPending(TaskAttemptID, TaskStatus)}. Then it repeatedly polls
+ * the ApplicationMaster whether it {@link #canCommit(TaskAttemptID)} This is
+ * a legacy from the centralized commit protocol handling by the JobTracker.
+ */
+ @Override
+ public void commitPending(TaskAttemptID taskAttemptID, TaskStatus taskStatsu)
+ throws IOException, InterruptedException {
+ LOG.info("Commit-pending state update from " + taskAttemptID.toString());
+ // An attempt is asking if it can commit its output. This can be decided
+ // only by the task which is managing the multiple attempts. So redirect the
+ // request there.
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
+ TypeConverter.toYarn(taskAttemptID);
+
+ taskHeartbeatHandler.receivedPing(attemptID);
+ //Ignorable TaskStatus? - since a task will send a LastStatusUpdate
+ context.getEventHandler().handle(
+ new TaskAttemptEvent(attemptID,
+ TaskAttemptEventType.TA_COMMIT_PENDING));
+ }
+
+ @Override
+ public void done(TaskAttemptID taskAttemptID) throws IOException {
+ LOG.info("Done acknowledgement from " + taskAttemptID.toString());
+
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
+ TypeConverter.toYarn(taskAttemptID);
+
+ taskHeartbeatHandler.receivedPing(attemptID);
+
+ context.getEventHandler().handle(
+ new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_DONE));
+ }
+
+ @Override
+ public void fatalError(TaskAttemptID taskAttemptID, String msg)
+ throws IOException {
+ // This happens only in Child and in the Task.
+ LOG.fatal("Task: " + taskAttemptID + " - exited : " + msg);
+ reportDiagnosticInfo(taskAttemptID, "Error: " + msg);
+
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
+ TypeConverter.toYarn(taskAttemptID);
+ context.getEventHandler().handle(
+ new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
+ }
+
+ @Override
+ public void fsError(TaskAttemptID taskAttemptID, String message)
+ throws IOException {
+ // This happens only in Child.
+ LOG.fatal("Task: " + taskAttemptID + " - failed due to FSError: "
+ + message);
+ reportDiagnosticInfo(taskAttemptID, "FSError: " + message);
+
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
+ TypeConverter.toYarn(taskAttemptID);
+ context.getEventHandler().handle(
+ new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
+ }
+
+ @Override
+ public void shuffleError(TaskAttemptID taskAttemptID, String message) throws IOException {
+ // TODO: This isn't really used in any MR code. Ask for removal.
+ }
+
+ @Override
+ public MapTaskCompletionEventsUpdate getMapCompletionEvents(
+ JobID jobIdentifier, int fromEventId, int maxEvents,
+ TaskAttemptID taskAttemptID) throws IOException {
+ LOG.info("MapCompletionEvents request from " + taskAttemptID.toString()
+ + ". fromEventID " + fromEventId + " maxEvents " + maxEvents);
+
+ // TODO: shouldReset is never used. See TT. Ask for Removal.
+ boolean shouldReset = false;
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
+ TypeConverter.toYarn(taskAttemptID);
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent[] events =
+ context.getJob(attemptID.getTaskId().getJobId()).getTaskAttemptCompletionEvents(
+ fromEventId, maxEvents);
+
+ taskHeartbeatHandler.receivedPing(attemptID);
+
+ // filter the events to return only map completion events in old format
+ List<TaskCompletionEvent> mapEvents = new ArrayList<TaskCompletionEvent>();
+ for (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent event : events) {
+ if (TaskType.MAP.equals(event.getAttemptId().getTaskId().getTaskType())) {
+ mapEvents.add(TypeConverter.fromYarn(event));
+ }
+ }
+
+ return new MapTaskCompletionEventsUpdate(
+ mapEvents.toArray(new TaskCompletionEvent[0]), shouldReset);
+ }
+
+ @Override
+ public boolean ping(TaskAttemptID taskAttemptID) throws IOException {
+ LOG.info("Ping from " + taskAttemptID.toString());
+ taskHeartbeatHandler.receivedPing(TypeConverter.toYarn(taskAttemptID));
+ return true;
+ }
+
+ @Override
+ public void reportDiagnosticInfo(TaskAttemptID taskAttemptID, String diagnosticInfo)
+ throws IOException {
+ LOG.info("Diagnostics report from " + taskAttemptID.toString() + ": "
+ + diagnosticInfo);
+
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
+ TypeConverter.toYarn(taskAttemptID);
+ taskHeartbeatHandler.receivedPing(attemptID);
+
+ // This is mainly used for cases where we want to propagate exception traces
+ // of tasks that fail.
+
+ // This call exists as a hadoop mapreduce legacy wherein all changes in
+ // counters/progress/phase/output-size are reported through statusUpdate()
+ // call but not diagnosticInformation.
+ context.getEventHandler().handle(
+ new TaskAttemptDiagnosticsUpdateEvent(attemptID, diagnosticInfo));
+ }
+
+ @Override
+ public boolean statusUpdate(TaskAttemptID taskAttemptID,
+ TaskStatus taskStatus) throws IOException, InterruptedException {
+ LOG.info("Status update from " + taskAttemptID.toString());
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId yarnAttemptID =
+ TypeConverter.toYarn(taskAttemptID);
+ taskHeartbeatHandler.receivedPing(yarnAttemptID);
+ TaskAttemptStatus taskAttemptStatus =
+ new TaskAttemptStatus();
+ taskAttemptStatus.id = yarnAttemptID;
+ // Task sends the updated progress to the TT.
+ taskAttemptStatus.progress = taskStatus.getProgress();
+ LOG.info("Progress of TaskAttempt " + taskAttemptID + " is : "
+ + taskStatus.getProgress());
+ // Task sends the diagnostic information to the TT
+ taskAttemptStatus.diagnosticInfo = taskStatus.getDiagnosticInfo();
+ // Task sends the updated state-string to the TT.
+ taskAttemptStatus.stateString = taskStatus.getStateString();
+ // Set the output-size when map-task finishes. Set by the task itself.
+ taskAttemptStatus.outputSize = taskStatus.getOutputSize();
+ // Task sends the updated phase to the TT.
+ taskAttemptStatus.phase = TypeConverter.toYarn(taskStatus.getPhase());
+ // Counters are updated by the task.
+ taskAttemptStatus.counters =
+ TypeConverter.toYarn(taskStatus.getCounters());
+
+ // Map Finish time set by the task (map only)
+ if (taskStatus.getIsMap() && taskStatus.getMapFinishTime() != 0) {
+ taskAttemptStatus.mapFinishTime = taskStatus.getMapFinishTime();
+ }
+
+ // Shuffle Finish time set by the task (reduce only).
+ if (!taskStatus.getIsMap() && taskStatus.getShuffleFinishTime() != 0) {
+ taskAttemptStatus.shuffleFinishTime = taskStatus.getShuffleFinishTime();
+ }
+
+ // Sort finish time set by the task (reduce only).
+ if (!taskStatus.getIsMap() && taskStatus.getSortFinishTime() != 0) {
+ taskAttemptStatus.sortFinishTime = taskStatus.getSortFinishTime();
+ }
+
+ // Not Setting the task state. Used by speculation - will be set in TaskAttemptImpl
+ //taskAttemptStatus.taskState = TypeConverter.toYarn(taskStatus.getRunState());
+
+ //set the fetch failures
+ if (taskStatus.getFetchFailedMaps() != null
+ && taskStatus.getFetchFailedMaps().size() > 0) {
+ taskAttemptStatus.fetchFailedMaps =
+ new ArrayList<org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId>();
+ for (TaskAttemptID failedMapId : taskStatus.getFetchFailedMaps()) {
+ taskAttemptStatus.fetchFailedMaps.add(
+ TypeConverter.toYarn(failedMapId));
+ }
+ }
+
+ // Task sends the information about the nextRecordRange to the TT
+
+// TODO: The following are not needed here, but needed to be set somewhere inside AppMaster.
+// taskStatus.getRunState(); // Set by the TT/JT. Transform into a state TODO
+// taskStatus.getStartTime(); // Used to be set by the TaskTracker. This should be set by getTask().
+// taskStatus.getFinishTime(); // Used to be set by TT/JT. Should be set when task finishes
+// // This was used by TT to do counter updates only once every minute. So this
+// // isn't ever changed by the Task itself.
+// taskStatus.getIncludeCounters();
+
+ context.getEventHandler().handle(
+ new TaskAttemptStatusUpdateEvent(taskAttemptStatus.id,
+ taskAttemptStatus));
+ return true;
+ }
+
+ @Override
+ public long getProtocolVersion(String arg0, long arg1) throws IOException {
+ return TaskUmbilicalProtocol.versionID;
+ }
+
+ @Override
+ public void reportNextRecordRange(TaskAttemptID taskAttemptID, Range range)
+ throws IOException {
+ // This is used when the feature of skipping records is enabled.
+
+ // This call exists as a hadoop mapreduce legacy wherein all changes in
+ // counters/progress/phase/output-size are reported through statusUpdate()
+ // call but not the next record range information.
+ throw new IOException("Not yet implemented.");
+ }
+
+ @Override
+ public JvmTask getTask(JvmContext context) throws IOException {
+
+ // A rough imitation of code from TaskTracker.
+
+ JVMId jvmId = context.jvmId;
+ LOG.info("JVM with ID : " + jvmId + " asked for a task");
+
+ // TODO: Is it an authorised container to get a task? Otherwise return null.
+
+ // TODO: Is the request for task-launch still valid?
+
+ // TODO: Child.java's firstTaskID isn't really firstTaskID. Ask for update
+ // to jobId and task-type.
+
+ WrappedJvmID wJvmID = new WrappedJvmID(jvmId.getJobId(), jvmId.isMap,
+ jvmId.getId());
+ org.apache.hadoop.mapred.Task task = jvmIDToAttemptMap.get(wJvmID);
+ if (task != null) { //there may be lag in the attempt getting added here
+ LOG.info("JVM with ID: " + jvmId + " given task: " + task.getTaskID());
+ JvmTask jvmTask = new JvmTask(task, false);
+
+ //remove the task as it is no more needed and free up the memory
+ jvmIDToAttemptMap.remove(wJvmID);
+
+ return jvmTask;
+ }
+ return null;
+ }
+
+ @Override
+ public void register(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID,
+ org.apache.hadoop.mapred.Task task, WrappedJvmID jvmID) {
+ //create the mapping so that it is easy to look up
+ //when it comes back to ask for Task.
+ jvmIDToAttemptMap.put(jvmID, task);
+ //register this attempt
+ taskHeartbeatHandler.register(attemptID);
+ }
+
+ @Override
+ public void unregister(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID,
+ WrappedJvmID jvmID) {
+ //remove the mapping if not already removed
+ jvmIDToAttemptMap.remove(jvmID);
+
+ //unregister this attempt
+ taskHeartbeatHandler.unregister(attemptID);
+ }
+
+ @Override
+ public ProtocolSignature getProtocolSignature(String protocol,
+ long clientVersion, int clientMethodsHash) throws IOException {
+ return ProtocolSignature.getProtocolSignature(this,
+ protocol, clientVersion, clientMethodsHash);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/WrappedJvmID.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/WrappedJvmID.java
new file mode 100644
index 0000000..2a83a26
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/WrappedJvmID.java
@@ -0,0 +1,30 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+/**
+ * A simple wrapper for increasing the visibility.
+ */
+public class WrappedJvmID extends JVMId {
+
+ public WrappedJvmID(JobID jobID, boolean mapTask, int nextInt) {
+ super(jobID, mapTask, nextInt);
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/WrappedPeriodicStatsAccumulator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/WrappedPeriodicStatsAccumulator.java
new file mode 100644
index 0000000..96c0bbe
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/WrappedPeriodicStatsAccumulator.java
@@ -0,0 +1,15 @@
+package org.apache.hadoop.mapred;
+
+//Workaround for PeriodicStateAccumulator being package access
+public class WrappedPeriodicStatsAccumulator {
+
+ private PeriodicStatsAccumulator real;
+
+ public WrappedPeriodicStatsAccumulator(PeriodicStatsAccumulator real) {
+ this.real = real;
+ }
+
+ public void extend(double newProgress, int newValue) {
+ real.extend(newProgress, newValue);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/WrappedProgressSplitsBlock.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/WrappedProgressSplitsBlock.java
new file mode 100644
index 0000000..4aa8c82
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/WrappedProgressSplitsBlock.java
@@ -0,0 +1,52 @@
+package org.apache.hadoop.mapred;
+
+// Workaround for ProgressSplitBlock being package access
+public class WrappedProgressSplitsBlock extends ProgressSplitsBlock {
+
+ public static final int DEFAULT_NUMBER_PROGRESS_SPLITS = 12;
+
+ private WrappedPeriodicStatsAccumulator wrappedProgressWallclockTime;
+ private WrappedPeriodicStatsAccumulator wrappedProgressCPUTime;
+ private WrappedPeriodicStatsAccumulator wrappedProgressVirtualMemoryKbytes;
+ private WrappedPeriodicStatsAccumulator wrappedProgressPhysicalMemoryKbytes;
+
+ public WrappedProgressSplitsBlock(int numberSplits) {
+ super(numberSplits);
+ }
+
+ public int[][] burst() {
+ return super.burst();
+ }
+
+ public WrappedPeriodicStatsAccumulator getProgressWallclockTime() {
+ if (wrappedProgressWallclockTime == null) {
+ wrappedProgressWallclockTime = new WrappedPeriodicStatsAccumulator(
+ progressWallclockTime);
+ }
+ return wrappedProgressWallclockTime;
+ }
+
+ public WrappedPeriodicStatsAccumulator getProgressCPUTime() {
+ if (wrappedProgressCPUTime == null) {
+ wrappedProgressCPUTime = new WrappedPeriodicStatsAccumulator(
+ progressCPUTime);
+ }
+ return wrappedProgressCPUTime;
+ }
+
+ public WrappedPeriodicStatsAccumulator getProgressVirtualMemoryKbytes() {
+ if (wrappedProgressVirtualMemoryKbytes == null) {
+ wrappedProgressVirtualMemoryKbytes = new WrappedPeriodicStatsAccumulator(
+ progressVirtualMemoryKbytes);
+ }
+ return wrappedProgressVirtualMemoryKbytes;
+ }
+
+ public WrappedPeriodicStatsAccumulator getProgressPhysicalMemoryKbytes() {
+ if (wrappedProgressPhysicalMemoryKbytes == null) {
+ wrappedProgressPhysicalMemoryKbytes = new WrappedPeriodicStatsAccumulator(
+ progressPhysicalMemoryKbytes);
+ }
+ return wrappedProgressPhysicalMemoryKbytes;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
new file mode 100644
index 0000000..3021004
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
@@ -0,0 +1,346 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintStream;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSError;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.filecache.DistributedCache;
+import org.apache.hadoop.mapreduce.security.TokenCache;
+import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
+import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
+import org.apache.hadoop.mapreduce.v2.MRConstants;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.log4j.LogManager;
+
+/**
+ * The main() for MapReduce task processes.
+ */
+class YarnChild {
+
+ private static final Log LOG = LogFactory.getLog(YarnChild.class);
+
+ static volatile TaskAttemptID taskid = null;
+
+ public static void main(String[] args) throws Throwable {
+ LOG.debug("Child starting");
+
+ final JobConf defaultConf = new JobConf();
+ defaultConf.addResource(MRConstants.JOB_CONF_FILE);
+ UserGroupInformation.setConfiguration(defaultConf);
+
+ String host = args[0];
+ int port = Integer.parseInt(args[1]);
+ final InetSocketAddress address = new InetSocketAddress(host, port);
+ final TaskAttemptID firstTaskid = TaskAttemptID.forName(args[2]);
+ int jvmIdInt = Integer.parseInt(args[3]);
+ JVMId jvmId = new JVMId(firstTaskid.getJobID(),
+ firstTaskid.getTaskType() == TaskType.MAP, jvmIdInt);
+
+ // initialize metrics
+ DefaultMetricsSystem.initialize(
+ StringUtils.camelize(firstTaskid.getTaskType().name()) +"Task");
+
+ Token<JobTokenIdentifier> jt = loadCredentials(defaultConf, address);
+
+ // Create TaskUmbilicalProtocol as actual task owner.
+ UserGroupInformation taskOwner =
+ UserGroupInformation.createRemoteUser(firstTaskid.getJobID().toString());
+ taskOwner.addToken(jt);
+ final TaskUmbilicalProtocol umbilical =
+ taskOwner.doAs(new PrivilegedExceptionAction<TaskUmbilicalProtocol>() {
+ @Override
+ public TaskUmbilicalProtocol run() throws Exception {
+ return (TaskUmbilicalProtocol)RPC.getProxy(TaskUmbilicalProtocol.class,
+ TaskUmbilicalProtocol.versionID, address, defaultConf);
+ }
+ });
+
+ // report non-pid to application master
+ JvmContext context = new JvmContext(jvmId, "-1000");
+ LOG.debug("PID: " + System.getenv().get("JVM_PID"));
+ Task task = null;
+ UserGroupInformation childUGI = null;
+
+ try {
+ int idleLoopCount = 0;
+ JvmTask myTask = null;;
+ // poll for new task
+ for (int idle = 0; null == myTask; ++idle) {
+ long sleepTimeMilliSecs = Math.min(idle * 500, 1500);
+ LOG.info("Sleeping for " + sleepTimeMilliSecs
+ + "ms before retrying again. Got null now.");
+ MILLISECONDS.sleep(sleepTimeMilliSecs);
+ myTask = umbilical.getTask(context);
+ }
+ if (myTask.shouldDie()) {
+ return;
+ }
+
+ task = myTask.getTask();
+ YarnChild.taskid = task.getTaskID();
+
+ // Create the job-conf and set credentials
+ final JobConf job =
+ configureTask(task, defaultConf.getCredentials(), jt);
+
+ // Initiate Java VM metrics
+ JvmMetrics.initSingleton(jvmId.toString(), job.getSessionId());
+ LOG.debug("Remote user: " + job.get("user.name"));
+ childUGI = UserGroupInformation.createRemoteUser(job.get("user.name"));
+ // Add tokens to new user so that it may execute its task correctly.
+ for(Token<?> token : UserGroupInformation.getCurrentUser().getTokens()) {
+ childUGI.addToken(token);
+ }
+
+ // Create a final reference to the task for the doAs block
+ final Task taskFinal = task;
+ childUGI.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ // use job-specified working directory
+ FileSystem.get(job).setWorkingDirectory(job.getWorkingDirectory());
+ taskFinal.run(job, umbilical); // run the task
+ return null;
+ }
+ });
+ } catch (FSError e) {
+ LOG.fatal("FSError from child", e);
+ umbilical.fsError(taskid, e.getMessage());
+ } catch (Exception exception) {
+ LOG.warn("Exception running child : "
+ + StringUtils.stringifyException(exception));
+ try {
+ if (task != null) {
+ // do cleanup for the task
+ if (childUGI == null) { // no need to job into doAs block
+ task.taskCleanup(umbilical);
+ } else {
+ final Task taskFinal = task;
+ childUGI.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ taskFinal.taskCleanup(umbilical);
+ return null;
+ }
+ });
+ }
+ }
+ } catch (Exception e) {
+ LOG.info("Exception cleaning up: " + StringUtils.stringifyException(e));
+ }
+ // Report back any failures, for diagnostic purposes
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ exception.printStackTrace(new PrintStream(baos));
+ if (taskid != null) {
+ umbilical.reportDiagnosticInfo(taskid, baos.toString());
+ }
+ } catch (Throwable throwable) {
+ LOG.fatal("Error running child : "
+ + StringUtils.stringifyException(throwable));
+ if (taskid != null) {
+ Throwable tCause = throwable.getCause();
+ String cause = tCause == null
+ ? throwable.getMessage()
+ : StringUtils.stringifyException(tCause);
+ umbilical.fatalError(taskid, cause);
+ }
+ } finally {
+ RPC.stopProxy(umbilical);
+ DefaultMetricsSystem.shutdown();
+ // Shutting down log4j of the child-vm...
+ // This assumes that on return from Task.run()
+ // there is no more logging done.
+ LogManager.shutdown();
+ }
+ }
+
+ private static Token<JobTokenIdentifier> loadCredentials(JobConf conf,
+ InetSocketAddress address) throws IOException {
+ //load token cache storage
+ String tokenFileLocation =
+ System.getenv(ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME);
+ String jobTokenFile =
+ new Path(tokenFileLocation).makeQualified(FileSystem.getLocal(conf))
+ .toUri().getPath();
+ Credentials credentials =
+ TokenCache.loadTokens(jobTokenFile, conf);
+ LOG.debug("loading token. # keys =" +credentials.numberOfSecretKeys() +
+ "; from file=" + jobTokenFile);
+ Token<JobTokenIdentifier> jt = TokenCache.getJobToken(credentials);
+ jt.setService(new Text(address.getAddress().getHostAddress() + ":"
+ + address.getPort()));
+ UserGroupInformation current = UserGroupInformation.getCurrentUser();
+ current.addToken(jt);
+ for (Token<? extends TokenIdentifier> tok : credentials.getAllTokens()) {
+ current.addToken(tok);
+ }
+ // Set the credentials
+ conf.setCredentials(credentials);
+ return jt;
+ }
+
+ /**
+ * Configure mapred-local dirs. This config is used by the task for finding
+ * out an output directory.
+ */
+ private static void configureLocalDirs(Task task, JobConf job) {
+ String[] localSysDirs = StringUtils.getTrimmedStrings(
+ System.getenv(ApplicationConstants.LOCAL_DIR_ENV));
+ job.setStrings(MRConfig.LOCAL_DIR, localSysDirs);
+ LOG.info(MRConfig.LOCAL_DIR + " for child: " + job.get(MRConfig.LOCAL_DIR));
+ }
+
+ private static JobConf configureTask(Task task, Credentials credentials,
+ Token<JobTokenIdentifier> jt) throws IOException {
+ final JobConf job = new JobConf(MRConstants.JOB_CONF_FILE);
+ job.setCredentials(credentials);
+ // set tcp nodelay
+ job.setBoolean("ipc.client.tcpnodelay", true);
+ job.setClass(MRConfig.TASK_LOCAL_OUTPUT_CLASS,
+ YarnOutputFiles.class, MapOutputFile.class);
+ // set the jobTokenFile into task
+ task.setJobTokenSecret(
+ JobTokenSecretManager.createSecretKey(jt.getPassword()));
+
+ // setup the child's MRConfig.LOCAL_DIR.
+ configureLocalDirs(task, job);
+
+ // setup the child's attempt directories
+ // Do the task-type specific localization
+ task.localizeConfiguration(job);
+
+ // Set up the DistributedCache related configs
+ setupDistributedCacheConfig(job);
+
+ // Overwrite the localized task jobconf which is linked to in the current
+ // work-dir.
+ Path localTaskFile = new Path(Constants.JOBFILE);
+ writeLocalJobFile(localTaskFile, job);
+ task.setJobFile(localTaskFile.toString());
+ task.setConf(job);
+ return job;
+ }
+
+ /**
+ * Set up the DistributedCache related configs to make
+ * {@link DistributedCache#getLocalCacheFiles(Configuration)}
+ * and
+ * {@link DistributedCache#getLocalCacheArchives(Configuration)}
+ * working.
+ * @param job
+ * @throws IOException
+ */
+ private static void setupDistributedCacheConfig(final JobConf job)
+ throws IOException {
+
+ String localWorkDir = System.getenv("PWD");
+ // ^ ^ all symlinks are created in the current work-dir
+
+ // Update the configuration object with localized archives.
+ URI[] cacheArchives = DistributedCache.getCacheArchives(job);
+ if (cacheArchives != null) {
+ List<String> localArchives = new ArrayList<String>();
+ for (int i = 0; i < cacheArchives.length; ++i) {
+ URI u = cacheArchives[i];
+ Path p = new Path(u);
+ Path name =
+ new Path((null == u.getFragment()) ? p.getName()
+ : u.getFragment());
+ String linkName = name.toUri().getPath();
+ localArchives.add(new Path(localWorkDir, linkName).toUri().getPath());
+ }
+ if (!localArchives.isEmpty()) {
+ job.set(MRJobConfig.CACHE_LOCALARCHIVES, StringUtils
+ .arrayToString(localArchives.toArray(new String[localArchives
+ .size()])));
+ }
+ }
+
+ // Update the configuration object with localized files.
+ URI[] cacheFiles = DistributedCache.getCacheFiles(job);
+ if (cacheFiles != null) {
+ List<String> localFiles = new ArrayList<String>();
+ for (int i = 0; i < cacheFiles.length; ++i) {
+ URI u = cacheFiles[i];
+ Path p = new Path(u);
+ Path name =
+ new Path((null == u.getFragment()) ? p.getName()
+ : u.getFragment());
+ String linkName = name.toUri().getPath();
+ localFiles.add(new Path(localWorkDir, linkName).toUri().getPath());
+ }
+ if (!localFiles.isEmpty()) {
+ job.set(MRJobConfig.CACHE_LOCALFILES,
+ StringUtils.arrayToString(localFiles
+ .toArray(new String[localFiles.size()])));
+ }
+ }
+ }
+
+ private static final FsPermission urw_gr =
+ FsPermission.createImmutable((short) 0640);
+
+ /**
+ * Write the task specific job-configuration file.
+ * @throws IOException
+ */
+ private static void writeLocalJobFile(Path jobFile, JobConf conf)
+ throws IOException {
+ FileSystem localFs = FileSystem.getLocal(conf);
+ localFs.delete(jobFile);
+ OutputStream out = null;
+ try {
+ out = FileSystem.create(localFs, jobFile, urw_gr);
+ conf.writeXml(out);
+ } finally {
+ IOUtils.cleanup(LOG, out);
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java
new file mode 100644
index 0000000..e08e093
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java
@@ -0,0 +1,238 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRConfig;
+
+/**
+ * Manipulate the working area for the transient store for maps and reduces.
+ *
+ * This class is used by map and reduce tasks to identify the directories that
+ * they need to write to/read from for intermediate files. The callers of
+ * these methods are from child space.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class YarnOutputFiles extends MapOutputFile {
+
+ private JobConf conf;
+
+ private static final String JOB_OUTPUT_DIR = "output";
+ private static final String SPILL_FILE_PATTERN = "%s_spill_%d.out";
+ private static final String SPILL_INDEX_FILE_PATTERN = SPILL_FILE_PATTERN
+ + ".index";
+
+ public YarnOutputFiles() {
+ }
+
+ // assume configured to $localdir/usercache/$user/appcache/$appId
+ private LocalDirAllocator lDirAlloc =
+ new LocalDirAllocator(MRConfig.LOCAL_DIR);
+
+ private Path getAttemptOutputDir() {
+ return new Path(JOB_OUTPUT_DIR, conf.get(JobContext.TASK_ATTEMPT_ID));
+ }
+
+ /**
+ * Return the path to local map output file created earlier
+ *
+ * @return path
+ * @throws IOException
+ */
+ public Path getOutputFile() throws IOException {
+ Path attemptOutput =
+ new Path(getAttemptOutputDir(), MAP_OUTPUT_FILENAME_STRING);
+ return lDirAlloc.getLocalPathToRead(attemptOutput.toString(), conf);
+ }
+
+ /**
+ * Create a local map output file name.
+ *
+ * @param size the size of the file
+ * @return path
+ * @throws IOException
+ */
+ public Path getOutputFileForWrite(long size) throws IOException {
+ Path attemptOutput =
+ new Path(getAttemptOutputDir(), MAP_OUTPUT_FILENAME_STRING);
+ return lDirAlloc.getLocalPathForWrite(attemptOutput.toString(), size, conf);
+ }
+
+ /**
+ * Create a local map output file name on the same volume.
+ */
+ public Path getOutputFileForWriteInVolume(Path existing) {
+ Path outputDir = new Path(existing.getParent(), JOB_OUTPUT_DIR);
+ Path attemptOutputDir = new Path(outputDir,
+ conf.get(JobContext.TASK_ATTEMPT_ID));
+ return new Path(attemptOutputDir, MAP_OUTPUT_FILENAME_STRING);
+ }
+
+ /**
+ * Return the path to a local map output index file created earlier
+ *
+ * @return path
+ * @throws IOException
+ */
+ public Path getOutputIndexFile() throws IOException {
+ Path attemptIndexOutput =
+ new Path(getAttemptOutputDir(), MAP_OUTPUT_FILENAME_STRING +
+ MAP_OUTPUT_INDEX_SUFFIX_STRING);
+ return lDirAlloc.getLocalPathToRead(attemptIndexOutput.toString(), conf);
+ }
+
+ /**
+ * Create a local map output index file name.
+ *
+ * @param size the size of the file
+ * @return path
+ * @throws IOException
+ */
+ public Path getOutputIndexFileForWrite(long size) throws IOException {
+ Path attemptIndexOutput =
+ new Path(getAttemptOutputDir(), MAP_OUTPUT_FILENAME_STRING +
+ MAP_OUTPUT_INDEX_SUFFIX_STRING);
+ return lDirAlloc.getLocalPathForWrite(attemptIndexOutput.toString(),
+ size, conf);
+ }
+
+ /**
+ * Create a local map output index file name on the same volume.
+ */
+ public Path getOutputIndexFileForWriteInVolume(Path existing) {
+ Path outputDir = new Path(existing.getParent(), JOB_OUTPUT_DIR);
+ Path attemptOutputDir = new Path(outputDir,
+ conf.get(JobContext.TASK_ATTEMPT_ID));
+ return new Path(attemptOutputDir, MAP_OUTPUT_FILENAME_STRING +
+ MAP_OUTPUT_INDEX_SUFFIX_STRING);
+ }
+
+ /**
+ * Return a local map spill file created earlier.
+ *
+ * @param spillNumber the number
+ * @return path
+ * @throws IOException
+ */
+ public Path getSpillFile(int spillNumber) throws IOException {
+ return lDirAlloc.getLocalPathToRead(
+ String.format(SPILL_FILE_PATTERN,
+ conf.get(JobContext.TASK_ATTEMPT_ID), spillNumber), conf);
+ }
+
+ /**
+ * Create a local map spill file name.
+ *
+ * @param spillNumber the number
+ * @param size the size of the file
+ * @return path
+ * @throws IOException
+ */
+ public Path getSpillFileForWrite(int spillNumber, long size)
+ throws IOException {
+ return lDirAlloc.getLocalPathForWrite(
+ String.format(String.format(SPILL_FILE_PATTERN,
+ conf.get(JobContext.TASK_ATTEMPT_ID), spillNumber)), size, conf);
+ }
+
+ /**
+ * Return a local map spill index file created earlier
+ *
+ * @param spillNumber the number
+ * @return path
+ * @throws IOException
+ */
+ public Path getSpillIndexFile(int spillNumber) throws IOException {
+ return lDirAlloc.getLocalPathToRead(
+ String.format(SPILL_INDEX_FILE_PATTERN,
+ conf.get(JobContext.TASK_ATTEMPT_ID), spillNumber), conf);
+ }
+
+ /**
+ * Create a local map spill index file name.
+ *
+ * @param spillNumber the number
+ * @param size the size of the file
+ * @return path
+ * @throws IOException
+ */
+ public Path getSpillIndexFileForWrite(int spillNumber, long size)
+ throws IOException {
+ return lDirAlloc.getLocalPathForWrite(
+ String.format(SPILL_INDEX_FILE_PATTERN,
+ conf.get(JobContext.TASK_ATTEMPT_ID), spillNumber), size, conf);
+ }
+
+ /**
+ * Return a local reduce input file created earlier
+ *
+ * @param mapId a map task id
+ * @return path
+ * @throws IOException
+ */
+ public Path getInputFile(int mapId) throws IOException {
+ throw new UnsupportedOperationException("Incompatible with LocalRunner");
+ }
+
+ /**
+ * Create a local reduce input file name.
+ *
+ * @param mapId a map task id
+ * @param size the size of the file
+ * @return path
+ * @throws IOException
+ */
+ public Path getInputFileForWrite(org.apache.hadoop.mapreduce.TaskID mapId,
+ long size) throws IOException {
+ return lDirAlloc.getLocalPathForWrite(String.format(
+ REDUCE_INPUT_FILE_FORMAT_STRING,
+ getAttemptOutputDir().toString(), mapId.getId()),
+ size, conf);
+ }
+
+ /** Removes all of the files related to a task. */
+ public void removeAll() throws IOException {
+ throw new UnsupportedOperationException("Incompatible with LocalRunner");
+ }
+
+ @Override
+ public void setConf(Configuration conf) {
+ if (conf instanceof JobConf) {
+ this.conf = (JobConf) conf;
+ } else {
+ this.conf = new JobConf(conf);
+ }
+ }
+
+ @Override
+ public Configuration getConf() {
+ return conf;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEvent.java
new file mode 100644
index 0000000..e853b1c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEvent.java
@@ -0,0 +1,42 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.jobhistory;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.yarn.event.AbstractEvent;
+
+public class JobHistoryEvent extends AbstractEvent<EventType>{
+
+ private final JobId jobID;
+ private final HistoryEvent historyEvent;
+
+ public JobHistoryEvent(JobId jobID, HistoryEvent historyEvent) {
+ super(historyEvent.getEventType());
+ this.jobID = jobID;
+ this.historyEvent = historyEvent;
+ }
+
+ public JobId getJobID() {
+ return jobID;
+ }
+
+ public HistoryEvent getHistoryEvent() {
+ return historyEvent;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
new file mode 100644
index 0000000..27fd448
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -0,0 +1,675 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.jobhistory;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.mapreduce.JobCounter;
+import org.apache.hadoop.mapreduce.v2.api.records.Counter;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JHConfig;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.service.AbstractService;
+
+/**
+ * The job history events get routed to this class. This class writes the Job
+ * history events to the DFS directly into a staging dir and then moved to a
+ * done-dir. JobHistory implementation is in this package to access package
+ * private classes.
+ */
+public class JobHistoryEventHandler extends AbstractService
+ implements EventHandler<JobHistoryEvent> {
+
+ private final AppContext context;
+ private final int startCount;
+
+ //TODO Does the FS object need to be different ?
+ private FileSystem stagingDirFS; // log Dir FileSystem
+ private FileSystem doneDirFS; // done Dir FileSystem
+
+ private Configuration conf;
+
+ private Path stagingDirPath = null;
+ private Path doneDirPrefixPath = null; // folder for completed jobs
+
+
+ private BlockingQueue<JobHistoryEvent> eventQueue =
+ new LinkedBlockingQueue<JobHistoryEvent>();
+ private Thread eventHandlingThread;
+ private volatile boolean stopped;
+ private final Object lock = new Object();
+
+ private static final Log LOG = LogFactory.getLog(
+ JobHistoryEventHandler.class);
+
+ private static final Map<JobId, MetaInfo> fileMap =
+ Collections.<JobId,MetaInfo>synchronizedMap(new HashMap<JobId,MetaInfo>());
+
+ public JobHistoryEventHandler(AppContext context, int startCount) {
+ super("JobHistoryEventHandler");
+ this.context = context;
+ this.startCount = startCount;
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hadoop.yarn.service.AbstractService#init(org.apache.hadoop.conf.Configuration)
+ * Initializes the FileSystem and Path objects for the log and done directories.
+ * Creates these directories if they do not already exist.
+ */
+ @Override
+ public void init(Configuration conf) {
+
+ this.conf = conf;
+
+ String stagingDirStr = null;
+ String doneDirStr = null;
+ String userDoneDirStr = null;
+ try {
+ stagingDirStr = JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(conf);
+ doneDirStr =
+ JobHistoryUtils.getConfiguredHistoryIntermediateDoneDirPrefix(conf);
+ userDoneDirStr =
+ JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);
+ } catch (IOException e) {
+ LOG.error("Failed while getting the configured log directories", e);
+ throw new YarnException(e);
+ }
+
+ //Check for the existence of the history staging dir. Maybe create it.
+ try {
+ stagingDirPath =
+ FileSystem.get(conf).makeQualified(new Path(stagingDirStr));
+ stagingDirFS = FileSystem.get(stagingDirPath.toUri(), conf);
+ mkdir(stagingDirFS, stagingDirPath, new FsPermission(
+ JobHistoryUtils.HISTORY_STAGING_DIR_PERMISSIONS));
+ } catch (IOException e) {
+ LOG.error("Failed while checking for/creating history staging path: ["
+ + stagingDirPath + "]", e);
+ throw new YarnException(e);
+ }
+
+ //Check for the existence of intermediate done dir.
+ Path doneDirPath = null;
+ try {
+ doneDirPath = FileSystem.get(conf).makeQualified(new Path(doneDirStr));
+ doneDirFS = FileSystem.get(doneDirPath.toUri(), conf);
+ // This directory will be in a common location, or this may be a cluster
+ // meant for a single user. Creating based on the conf. Should ideally be
+ // created by the JobHistoryServer or as part of deployment.
+ if (!doneDirFS.exists(doneDirPath)) {
+ if (JobHistoryUtils.shouldCreateNonUserDirectory(conf)) {
+ LOG.info("Creating intermediate history logDir: ["
+ + doneDirPath
+ + "] + based on conf. Should ideally be created by the JobHistoryServer: "
+ + JHConfig.CREATE_HISTORY_INTERMEDIATE_BASE_DIR_KEY);
+ mkdir(
+ doneDirFS,
+ doneDirPath,
+ new FsPermission(
+ JobHistoryUtils.HISTORY_INTERMEDIATE_DONE_DIR_PERMISSIONS
+ .toShort()));
+ // TODO Temporary toShort till new FsPermission(FsPermissions)
+ // respects
+ // sticky
+ } else {
+ String message = "Not creating intermediate history logDir: ["
+ + doneDirPath
+ + "] based on conf: "
+ + JHConfig.CREATE_HISTORY_INTERMEDIATE_BASE_DIR_KEY
+ + ". Either set to true or pre-create this directory with appropriate permissions";
+ LOG.error(message);
+ throw new YarnException(message);
+ }
+ }
+ } catch (IOException e) {
+ LOG.error("Failed checking for the existance of history intermediate done directory: ["
+ + doneDirPath + "]");
+ throw new YarnException(e);
+ }
+
+ //Check/create user directory under intermediate done dir.
+ try {
+ doneDirPrefixPath =
+ FileSystem.get(conf).makeQualified(new Path(userDoneDirStr));
+ mkdir(doneDirFS, doneDirPrefixPath, new FsPermission(
+ JobHistoryUtils.HISTORY_INTERMEDIATE_USER_DIR_PERMISSIONS));
+ } catch (IOException e) {
+ LOG.error("Error creating user intermediate history done directory: [ "
+ + doneDirPrefixPath + "]", e);
+ throw new YarnException(e);
+ }
+
+ super.init(conf);
+ }
+
+ private void mkdir(FileSystem fs, Path path, FsPermission fsp)
+ throws IOException {
+ if (!fs.exists(path)) {
+ try {
+ fs.mkdirs(path, fsp);
+ FileStatus fsStatus = fs.getFileStatus(path);
+ LOG.info("Perms after creating " + fsStatus.getPermission().toShort()
+ + ", Expected: " + fsp.toShort());
+ if (fsStatus.getPermission().toShort() != fsp.toShort()) {
+ LOG.info("Explicitly setting permissions to : " + fsp.toShort()
+ + ", " + fsp);
+ fs.setPermission(path, fsp);
+ }
+ } catch (FileAlreadyExistsException e) {
+ LOG.info("Directory: [" + path + "] already exists.");
+ }
+ }
+ }
+
+ @Override
+ public void start() {
+ eventHandlingThread = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ JobHistoryEvent event = null;
+ while (!stopped && !Thread.currentThread().isInterrupted()) {
+ try {
+ event = eventQueue.take();
+ } catch (InterruptedException e) {
+ LOG.info("EventQueue take interrupted. Returning");
+ return;
+ }
+ // If an event has been removed from the queue. Handle it.
+ // The rest of the queue is handled via stop()
+ // Clear the interrupt status if it's set before calling handleEvent
+ // and set it if it was set before calling handleEvent.
+ // Interrupts received from other threads during handleEvent cannot be
+ // dealth with - Shell.runCommand() ignores them.
+ synchronized (lock) {
+ boolean isInterrupted = Thread.interrupted();
+ handleEvent(event);
+ if (isInterrupted) {
+ Thread.currentThread().interrupt();
+ }
+ }
+ }
+ }
+ });
+ eventHandlingThread.start();
+ super.start();
+ }
+
+ @Override
+ public void stop() {
+ LOG.info("Stopping JobHistoryEventHandler");
+ stopped = true;
+ //do not interrupt while event handling is in progress
+ synchronized(lock) {
+ eventHandlingThread.interrupt();
+ }
+
+ try {
+ eventHandlingThread.join();
+ } catch (InterruptedException ie) {
+ LOG.info("Interruped Exception while stopping", ie);
+ }
+ //write all the events remaining in queue
+ Iterator<JobHistoryEvent> it = eventQueue.iterator();
+ while(it.hasNext()) {
+ JobHistoryEvent ev = it.next();
+ LOG.info("In stop, writing event " + ev.getType());
+ handleEvent(ev);
+ }
+
+ //close all file handles
+ for (MetaInfo mi : fileMap.values()) {
+ try {
+ mi.closeWriter();
+ } catch (IOException e) {
+ LOG.info("Exception while closing file " + e.getMessage());
+ }
+ }
+ LOG.info("Stopped JobHistoryEventHandler. super.stop()");
+ super.stop();
+ }
+
+ /**
+ * Create an event writer for the Job represented by the jobID.
+ * Writes out the job configuration to the log directory.
+ * This should be the first call to history for a job
+ *
+ * @param jobId the jobId.
+ * @throws IOException
+ */
+ protected void setupEventWriter(JobId jobId, JobSubmittedEvent jse)
+ throws IOException {
+ if (stagingDirPath == null) {
+ LOG.error("Log Directory is null, returning");
+ throw new IOException("Missing Log Directory for History");
+ }
+
+ MetaInfo oldFi = fileMap.get(jobId);
+ Configuration conf = getConfig();
+
+ long submitTime = oldFi == null ? jse.getSubmitTime() : oldFi
+ .getJobIndexInfo().getSubmitTime();
+
+ // TODO Ideally this should be written out to the job dir
+ // (.staging/jobid/files - RecoveryService will need to be patched)
+ Path historyFile = JobHistoryUtils.getStagingJobHistoryFile(
+ stagingDirPath, jobId, startCount);
+ String user = UserGroupInformation.getCurrentUser().getShortUserName();
+ if (user == null) {
+ throw new IOException(
+ "User is null while setting up jobhistory eventwriter");
+ }
+
+ String jobName = context.getJob(jobId).getName();
+ EventWriter writer = (oldFi == null) ? null : oldFi.writer;
+
+ if (writer == null) {
+ try {
+ FSDataOutputStream out = stagingDirFS.create(historyFile, true);
+ writer = new EventWriter(out);
+ LOG.info("Event Writer setup for JobId: " + jobId + ", File: "
+ + historyFile);
+ } catch (IOException ioe) {
+ LOG.info("Could not create log file: [" + historyFile + "] + for job "
+ + "[" + jobName + "]");
+ throw ioe;
+ }
+ }
+
+ Path logDirConfPath = null;
+ if (conf != null) {
+ // TODO Ideally this should be written out to the job dir
+ // (.staging/jobid/files - RecoveryService will need to be patched)
+ logDirConfPath = JobHistoryUtils.getStagingConfFile(stagingDirPath, jobId,
+ startCount);
+ FSDataOutputStream jobFileOut = null;
+ try {
+ if (logDirConfPath != null) {
+ jobFileOut = stagingDirFS.create(logDirConfPath, true);
+ conf.writeXml(jobFileOut);
+ jobFileOut.close();
+ }
+ } catch (IOException e) {
+ LOG.info("Failed to write the job configuration file", e);
+ throw e;
+ }
+ }
+
+ MetaInfo fi = new MetaInfo(historyFile, logDirConfPath, writer, submitTime,
+ user, jobName, jobId);
+ fi.getJobSummary().setJobId(jobId);
+ fi.getJobSummary().setJobSubmitTime(submitTime);
+ fileMap.put(jobId, fi);
+ }
+
+ /** Close the event writer for this id
+ * @throws IOException */
+ public void closeWriter(JobId id) throws IOException {
+ try {
+ final MetaInfo mi = fileMap.get(id);
+ if (mi != null) {
+ mi.closeWriter();
+ }
+
+ } catch (IOException e) {
+ LOG.error("Error closing writer for JobID: " + id);
+ throw e;
+ }
+ }
+
+ @Override
+ public void handle(JobHistoryEvent event) {
+ try {
+ eventQueue.put(event);
+ } catch (InterruptedException e) {
+ throw new YarnException(e);
+ }
+ }
+
+ protected void handleEvent(JobHistoryEvent event) {
+ synchronized (lock) {
+
+ // If this is JobSubmitted Event, setup the writer
+ if (event.getHistoryEvent().getEventType() == EventType.JOB_SUBMITTED) {
+ try {
+ JobSubmittedEvent jobSubmittedEvent =
+ (JobSubmittedEvent) event.getHistoryEvent();
+ setupEventWriter(event.getJobID(), jobSubmittedEvent);
+ } catch (IOException ioe) {
+ LOG.error("Error JobHistoryEventHandler in handleEvent: " + event,
+ ioe);
+ throw new YarnException(ioe);
+ }
+ }
+
+ // For all events
+ // (1) Write it out
+ // (2) Process it for JobSummary
+ MetaInfo mi = fileMap.get(event.getJobID());
+ try {
+ HistoryEvent historyEvent = event.getHistoryEvent();
+ mi.writeEvent(historyEvent);
+ processEventForJobSummary(event.getHistoryEvent(), mi.getJobSummary(), event.getJobID());
+ LOG.info("In HistoryEventHandler "
+ + event.getHistoryEvent().getEventType());
+ } catch (IOException e) {
+ LOG.error("Error writing History Event: " + event.getHistoryEvent(),
+ e);
+ throw new YarnException(e);
+ }
+
+ // If this is JobFinishedEvent, close the writer and setup the job-index
+ if (event.getHistoryEvent().getEventType() == EventType.JOB_FINISHED) {
+ try {
+ JobFinishedEvent jFinishedEvent =
+ (JobFinishedEvent) event.getHistoryEvent();
+ mi.getJobIndexInfo().setFinishTime(jFinishedEvent.getFinishTime());
+ mi.getJobIndexInfo().setNumMaps(jFinishedEvent.getFinishedMaps());
+ mi.getJobIndexInfo().setNumReduces(
+ jFinishedEvent.getFinishedReduces());
+ mi.getJobIndexInfo().setJobStatus(JobState.SUCCEEDED.toString());
+ closeEventWriter(event.getJobID());
+ } catch (IOException e) {
+ throw new YarnException(e);
+ }
+ }
+
+ if (event.getHistoryEvent().getEventType() == EventType.JOB_FAILED
+ || event.getHistoryEvent().getEventType() == EventType.JOB_KILLED) {
+ try {
+ JobUnsuccessfulCompletionEvent jucEvent = (JobUnsuccessfulCompletionEvent) event
+ .getHistoryEvent();
+ mi.getJobIndexInfo().setFinishTime(jucEvent.getFinishTime());
+ mi.getJobIndexInfo().setNumMaps(jucEvent.getFinishedMaps());
+ mi.getJobIndexInfo().setNumReduces(jucEvent.getFinishedReduces());
+ mi.getJobIndexInfo().setJobStatus(jucEvent.getStatus());
+ closeEventWriter(event.getJobID());
+ } catch (IOException e) {
+ throw new YarnException(e);
+ }
+ }
+ }
+ }
+
+ private void processEventForJobSummary(HistoryEvent event, JobSummary summary, JobId jobId) {
+ // context.getJob could be used for some of this info as well.
+ switch (event.getEventType()) {
+ case JOB_SUBMITTED:
+ JobSubmittedEvent jse = (JobSubmittedEvent) event;
+ summary.setUser(jse.getUserName());
+ summary.setQueue(jse.getJobQueueName());
+ break;
+ case JOB_INITED:
+ JobInitedEvent jie = (JobInitedEvent) event;
+ summary.setJobLaunchTime(jie.getLaunchTime());
+ break;
+ case MAP_ATTEMPT_STARTED:
+ TaskAttemptStartedEvent mtase = (TaskAttemptStartedEvent) event;
+ if (summary.getFirstMapTaskLaunchTime() == 0)
+ summary.setFirstMapTaskLaunchTime(mtase.getStartTime());
+ break;
+ case REDUCE_ATTEMPT_STARTED:
+ TaskAttemptStartedEvent rtase = (TaskAttemptStartedEvent) event;
+ if (summary.getFirstReduceTaskLaunchTime() == 0)
+ summary.setFirstReduceTaskLaunchTime(rtase.getStartTime());
+ break;
+ case JOB_FINISHED:
+ JobFinishedEvent jfe = (JobFinishedEvent) event;
+ summary.setJobFinishTime(jfe.getFinishTime());
+ summary.setNumFinishedMaps(jfe.getFinishedMaps());
+ summary.setNumFailedMaps(jfe.getFailedMaps());
+ summary.setNumFinishedReduces(jfe.getFinishedReduces());
+ summary.setNumFailedReduces(jfe.getFailedReduces());
+ if (summary.getJobStatus() == null)
+ summary
+ .setJobStatus(org.apache.hadoop.mapreduce.JobStatus.State.SUCCEEDED
+ .toString());
+ // TODO JOB_FINISHED does not have state. Effectively job history does not
+ // have state about the finished job.
+ setSummarySlotSeconds(summary, jobId);
+ break;
+ case JOB_FAILED:
+ case JOB_KILLED:
+ JobUnsuccessfulCompletionEvent juce = (JobUnsuccessfulCompletionEvent) event;
+ summary.setJobStatus(juce.getStatus());
+ summary.setNumFinishedMaps(context.getJob(jobId).getTotalMaps());
+ summary.setNumFinishedReduces(context.getJob(jobId).getTotalReduces());
+ summary.setJobFinishTime(juce.getFinishTime());
+ setSummarySlotSeconds(summary, jobId);
+ break;
+ }
+ }
+
+ private void setSummarySlotSeconds(JobSummary summary, JobId jobId) {
+ Counter slotMillisMapCounter =
+ context.getJob(jobId).getCounters()
+ .getCounter(JobCounter.SLOTS_MILLIS_MAPS);
+ if (slotMillisMapCounter != null) {
+ summary.setMapSlotSeconds(slotMillisMapCounter.getValue());
+ }
+ Counter slotMillisReduceCounter =
+ context.getJob(jobId).getCounters()
+ .getCounter(JobCounter.SLOTS_MILLIS_REDUCES);
+ if (slotMillisReduceCounter != null) {
+ summary.setMapSlotSeconds(slotMillisReduceCounter.getValue());
+ }
+ }
+
+ protected void closeEventWriter(JobId jobId) throws IOException {
+
+ final MetaInfo mi = fileMap.get(jobId);
+ if (mi == null) {
+ throw new IOException("No MetaInfo found for JobId: [" + jobId + "]");
+ }
+
+ if (!mi.isWriterActive()) {
+ throw new IOException(
+ "Inactive Writer: Likely received multiple JobFinished / JobUnsuccessful events for JobId: ["
+ + jobId + "]");
+ }
+
+ // Close the Writer
+ try {
+ mi.closeWriter();
+ } catch (IOException e) {
+ LOG.error("Error closing writer for JobID: " + jobId);
+ throw e;
+ }
+
+ if (mi.getHistoryFile() == null) {
+ LOG.warn("No file for job-history with " + jobId + " found in cache!");
+ }
+ if (mi.getConfFile() == null) {
+ LOG.warn("No file for jobconf with " + jobId + " found in cache!");
+ }
+
+ // Writing out the summary file.
+ // TODO JH enhancement - reuse this file to store additional indexing info
+ // like ACLs, etc. JHServer can use HDFS append to build an index file
+ // with more info than is available via the filename.
+ Path qualifiedSummaryDoneFile = null;
+ FSDataOutputStream summaryFileOut = null;
+ try {
+ String doneSummaryFileName = getTempFileName(JobHistoryUtils
+ .getIntermediateSummaryFileName(jobId));
+ qualifiedSummaryDoneFile = doneDirFS.makeQualified(new Path(
+ doneDirPrefixPath, doneSummaryFileName));
+ summaryFileOut = doneDirFS.create(qualifiedSummaryDoneFile, true);
+ summaryFileOut.writeUTF(mi.getJobSummary().getJobSummaryString());
+ summaryFileOut.close();
+ } catch (IOException e) {
+ LOG.info("Unable to write out JobSummaryInfo to ["
+ + qualifiedSummaryDoneFile + "]", e);
+ throw e;
+ }
+
+ try {
+
+ // Move historyFile to Done Folder.
+ Path qualifiedDoneFile = null;
+ if (mi.getHistoryFile() != null) {
+ Path historyFile = mi.getHistoryFile();
+ Path qualifiedLogFile = stagingDirFS.makeQualified(historyFile);
+ String doneJobHistoryFileName =
+ getTempFileName(FileNameIndexUtils.getDoneFileName(mi
+ .getJobIndexInfo()));
+ qualifiedDoneFile =
+ doneDirFS.makeQualified(new Path(doneDirPrefixPath,
+ doneJobHistoryFileName));
+ moveToDoneNow(qualifiedLogFile, qualifiedDoneFile);
+ }
+
+ // Move confFile to Done Folder
+ Path qualifiedConfDoneFile = null;
+ if (mi.getConfFile() != null) {
+ Path confFile = mi.getConfFile();
+ Path qualifiedConfFile = stagingDirFS.makeQualified(confFile);
+ String doneConfFileName =
+ getTempFileName(JobHistoryUtils
+ .getIntermediateConfFileName(jobId));
+ qualifiedConfDoneFile =
+ doneDirFS.makeQualified(new Path(doneDirPrefixPath,
+ doneConfFileName));
+ moveToDoneNow(qualifiedConfFile, qualifiedConfDoneFile);
+ }
+
+ moveTmpToDone(qualifiedSummaryDoneFile);
+ moveTmpToDone(qualifiedConfDoneFile);
+ moveTmpToDone(qualifiedDoneFile);
+
+ } catch (IOException e) {
+ LOG.error("Error closing writer for JobID: " + jobId);
+ throw e;
+ }
+ }
+
+ private class MetaInfo {
+ private Path historyFile;
+ private Path confFile;
+ private EventWriter writer;
+ JobIndexInfo jobIndexInfo;
+ JobSummary jobSummary;
+
+ MetaInfo(Path historyFile, Path conf, EventWriter writer, long submitTime,
+ String user, String jobName, JobId jobId) {
+ this.historyFile = historyFile;
+ this.confFile = conf;
+ this.writer = writer;
+ this.jobIndexInfo = new JobIndexInfo(submitTime, -1, user, jobName, jobId, -1, -1, null);
+ this.jobSummary = new JobSummary();
+ }
+
+ Path getHistoryFile() { return historyFile; }
+
+ Path getConfFile() {return confFile; }
+
+ JobIndexInfo getJobIndexInfo() { return jobIndexInfo; }
+
+ JobSummary getJobSummary() { return jobSummary; }
+
+ boolean isWriterActive() {return writer != null ; }
+
+ void closeWriter() throws IOException {
+ synchronized (lock) {
+ if (writer != null) {
+ writer.close();
+ }
+ writer = null;
+ }
+ }
+
+ void writeEvent(HistoryEvent event) throws IOException {
+ synchronized (lock) {
+ if (writer != null) {
+ writer.write(event);
+ writer.flush();
+ }
+ }
+ }
+ }
+
+ private void moveTmpToDone(Path tmpPath) throws IOException {
+ if (tmpPath != null) {
+ String tmpFileName = tmpPath.getName();
+ String fileName = getFileNameFromTmpFN(tmpFileName);
+ Path path = new Path(tmpPath.getParent(), fileName);
+ doneDirFS.rename(tmpPath, path);
+ LOG.info("Moved tmp to done: " + tmpPath + " to " + path);
+ }
+ }
+
+ // TODO If the FS objects are the same, this should be a rename instead of a
+ // copy.
+ private void moveToDoneNow(Path fromPath, Path toPath) throws IOException {
+ // check if path exists, in case of retries it may not exist
+ if (stagingDirFS.exists(fromPath)) {
+ LOG.info("Moving " + fromPath.toString() + " to " + toPath.toString());
+ // TODO temporarily removing the existing dst
+ if (doneDirFS.exists(toPath)) {
+ doneDirFS.delete(toPath, true);
+ }
+ boolean copied = FileUtil.copy(stagingDirFS, fromPath, doneDirFS, toPath,
+ false, conf);
+
+ if (copied)
+ LOG.info("Copied to done location: " + toPath);
+ else
+ LOG.info("copy failed");
+ doneDirFS.setPermission(toPath, new FsPermission(
+ JobHistoryUtils.HISTORY_INTERMEDIATE_FILE_PERMISSIONS));
+
+ stagingDirFS.delete(fromPath, false);
+ }
+ }
+
+ boolean pathExists(FileSystem fileSys, Path path) throws IOException {
+ return fileSys.exists(path);
+ }
+
+ private String getTempFileName(String srcFile) {
+ return srcFile + "_tmp";
+ }
+
+ private String getFileNameFromTmpFN(String tmpFileName) {
+ //TODO. Some error checking here.
+ return tmpFileName.substring(0, tmpFileName.length()-4);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobSummary.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobSummary.java
new file mode 100644
index 0000000..99eb351
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobSummary.java
@@ -0,0 +1,231 @@
+package org.apache.hadoop.mapreduce.jobhistory;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.util.StringUtils;
+
+public class JobSummary {
+ private JobId jobId;
+ private long jobSubmitTime;
+ private long jobLaunchTime;
+ private long firstMapTaskLaunchTime; // MapAttempteStarted |
+ // TaskAttemptStartEvent
+ private long firstReduceTaskLaunchTime; // ReduceAttemptStarted |
+ // TaskAttemptStartEvent
+ private long jobFinishTime;
+ private int numFinishedMaps;
+ private int numFailedMaps;
+ private int numFinishedReduces;
+ private int numFailedReduces;
+ // private int numSlotsPerMap; | Doesn't make sense with potentially different
+ // resource models
+ // private int numSlotsPerReduce; | Doesn't make sense with potentially
+ // different resource models
+ private String user;
+ private String queue;
+ private String jobStatus;
+ private long mapSlotSeconds; // TODO Not generated yet in MRV2
+ private long reduceSlotSeconds; // TODO Not generated yet MRV2
+ // private int clusterSlotCapacity;
+
+ JobSummary() {
+ }
+
+ public JobId getJobId() {
+ return jobId;
+ }
+
+ public void setJobId(JobId jobId) {
+ this.jobId = jobId;
+ }
+
+ public long getJobSubmitTime() {
+ return jobSubmitTime;
+ }
+
+ public void setJobSubmitTime(long jobSubmitTime) {
+ this.jobSubmitTime = jobSubmitTime;
+ }
+
+ public long getJobLaunchTime() {
+ return jobLaunchTime;
+ }
+
+ public void setJobLaunchTime(long jobLaunchTime) {
+ this.jobLaunchTime = jobLaunchTime;
+ }
+
+ public long getFirstMapTaskLaunchTime() {
+ return firstMapTaskLaunchTime;
+ }
+
+ public void setFirstMapTaskLaunchTime(long firstMapTaskLaunchTime) {
+ this.firstMapTaskLaunchTime = firstMapTaskLaunchTime;
+ }
+
+ public long getFirstReduceTaskLaunchTime() {
+ return firstReduceTaskLaunchTime;
+ }
+
+ public void setFirstReduceTaskLaunchTime(long firstReduceTaskLaunchTime) {
+ this.firstReduceTaskLaunchTime = firstReduceTaskLaunchTime;
+ }
+
+ public long getJobFinishTime() {
+ return jobFinishTime;
+ }
+
+ public void setJobFinishTime(long jobFinishTime) {
+ this.jobFinishTime = jobFinishTime;
+ }
+
+ public int getNumFinishedMaps() {
+ return numFinishedMaps;
+ }
+
+ public void setNumFinishedMaps(int numFinishedMaps) {
+ this.numFinishedMaps = numFinishedMaps;
+ }
+
+ public int getNumFailedMaps() {
+ return numFailedMaps;
+ }
+
+ public void setNumFailedMaps(int numFailedMaps) {
+ this.numFailedMaps = numFailedMaps;
+ }
+
+ // public int getNumSlotsPerMap() {
+ // return numSlotsPerMap;
+ // }
+ //
+ // public void setNumSlotsPerMap(int numSlotsPerMap) {
+ // this.numSlotsPerMap = numSlotsPerMap;
+ // }
+
+ public int getNumFinishedReduces() {
+ return numFinishedReduces;
+ }
+
+ public void setNumFinishedReduces(int numFinishedReduces) {
+ this.numFinishedReduces = numFinishedReduces;
+ }
+
+ public int getNumFailedReduces() {
+ return numFailedReduces;
+ }
+
+ public void setNumFailedReduces(int numFailedReduces) {
+ this.numFailedReduces = numFailedReduces;
+ }
+
+ // public int getNumSlotsPerReduce() {
+ // return numSlotsPerReduce;
+ // }
+ //
+ // public void setNumSlotsPerReduce(int numSlotsPerReduce) {
+ // this.numSlotsPerReduce = numSlotsPerReduce;
+ // }
+
+ public String getUser() {
+ return user;
+ }
+
+ public void setUser(String user) {
+ this.user = user;
+ }
+
+ public String getQueue() {
+ return queue;
+ }
+
+ public void setQueue(String queue) {
+ this.queue = queue;
+ }
+
+ public String getJobStatus() {
+ return jobStatus;
+ }
+
+ public void setJobStatus(String jobStatus) {
+ this.jobStatus = jobStatus;
+ }
+
+ public long getMapSlotSeconds() {
+ return mapSlotSeconds;
+ }
+
+ public void setMapSlotSeconds(long mapSlotSeconds) {
+ this.mapSlotSeconds = mapSlotSeconds;
+ }
+
+ public long getReduceSlotSeconds() {
+ return reduceSlotSeconds;
+ }
+
+ public void setReduceSlotSeconds(long reduceSlotSeconds) {
+ this.reduceSlotSeconds = reduceSlotSeconds;
+ }
+
+ // public int getClusterSlotCapacity() {
+ // return clusterSlotCapacity;
+ // }
+ //
+ // public void setClusterSlotCapacity(int clusterSlotCapacity) {
+ // this.clusterSlotCapacity = clusterSlotCapacity;
+ // }
+
+ public String getJobSummaryString() {
+ SummaryBuilder summary = new SummaryBuilder()
+ .add("jobId", jobId)
+ .add("submitTime", jobSubmitTime)
+ .add("launchTime", jobLaunchTime)
+ .add("firstMapTaskLaunchTime", firstMapTaskLaunchTime)
+ .add("firstReduceTaskLaunchTime", firstReduceTaskLaunchTime)
+ .add("finishTime", jobFinishTime)
+ .add("numMaps", numFinishedMaps + numFailedMaps)
+ .add("numReduces", numFinishedReduces + numFailedReduces)
+ .add("user", user)
+ .add("queue", queue)
+ .add("status", jobStatus)
+ .add("mapSlotSeconds", mapSlotSeconds)
+ .add("reduceSlotSeconds", reduceSlotSeconds);
+ return summary.toString();
+ }
+
+ static final char EQUALS = '=';
+ static final char[] charsToEscape = { StringUtils.COMMA, EQUALS,
+ StringUtils.ESCAPE_CHAR };
+
+ static class SummaryBuilder {
+ final StringBuilder buffer = new StringBuilder();
+
+ // A little optimization for a very common case
+ SummaryBuilder add(String key, long value) {
+ return _add(key, Long.toString(value));
+ }
+
+ <T> SummaryBuilder add(String key, T value) {
+ return _add(key, StringUtils.escapeString(String.valueOf(value),
+ StringUtils.ESCAPE_CHAR, charsToEscape));
+ }
+
+ SummaryBuilder add(SummaryBuilder summary) {
+ if (buffer.length() > 0)
+ buffer.append(StringUtils.COMMA);
+ buffer.append(summary.buffer);
+ return this;
+ }
+
+ SummaryBuilder _add(String key, String value) {
+ if (buffer.length() > 0)
+ buffer.append(StringUtils.COMMA);
+ buffer.append(key).append(EQUALS).append(value);
+ return this;
+ }
+
+ @Override
+ public String toString() {
+ return buffer.toString();
+ }
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AMConstants.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AMConstants.java
new file mode 100644
index 0000000..fbe3037
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AMConstants.java
@@ -0,0 +1,74 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import org.apache.hadoop.mapreduce.v2.MRConstants;
+
+public interface AMConstants {
+
+ public static final String CONTAINERLAUNCHER_THREADPOOL_SIZE =
+ "yarn.mapreduce.containerlauncher.threadpool-size";
+
+ public static final String AM_RM_SCHEDULE_INTERVAL =
+ "yarn.appMaster.scheduler.interval";
+
+ public static final int DEFAULT_AM_RM_SCHEDULE_INTERVAL = 2000;
+
+ public static final String AM_TASK_LISTENER_THREADS =
+ MRConstants.YARN_MR_PREFIX + "task.listener.threads";
+
+ public static final int DEFAULT_AM_TASK_LISTENER_THREADS = 10;
+
+ public static final String AM_JOB_CLIENT_THREADS =
+ MRConstants.YARN_MR_PREFIX + "job.client.threads";
+
+ public static final int DEFAULT_AM_JOB_CLIENT_THREADS = 1;
+
+ public static final String SPECULATOR_CLASS =
+ MRConstants.YARN_MR_PREFIX + "speculator.class";
+
+ public static final String TASK_RUNTIME_ESTIMATOR_CLASS =
+ MRConstants.YARN_MR_PREFIX + "task.runtime.estimator.class";
+
+ public static final String TASK_ATTEMPT_PROGRESS_RUNTIME_LINEARIZER_CLASS =
+ MRConstants.YARN_MR_PREFIX + "task.runtime.linearizer.class";
+
+ public static final String EXPONENTIAL_SMOOTHING_LAMBDA_MILLISECONDS =
+ MRConstants.YARN_MR_PREFIX
+ + "task.runtime.estimator.exponential.smooth.lambda";
+
+ public static final String EXPONENTIAL_SMOOTHING_SMOOTH_RATE =
+ MRConstants.YARN_MR_PREFIX
+ + "task.runtime.estimator.exponential.smooth.smoothsrate";
+
+ public static final String RECOVERY_ENABLE = MRConstants.YARN_MR_PREFIX
+ + "recovery.enable";
+
+ public static final float DEFAULT_REDUCE_RAMP_UP_LIMIT = 0.5f;
+ public static final String REDUCE_RAMPUP_UP_LIMIT = MRConstants.YARN_MR_PREFIX
+ + "reduce.rampup.limit";
+
+ public static final float DEFAULT_REDUCE_PREEMPTION_LIMIT = 0.5f;
+ public static final String REDUCE_PREEMPTION_LIMIT = MRConstants.YARN_MR_PREFIX
+ + "reduce.preemption.limit";
+
+ public static final String NODE_BLACKLISTING_ENABLE = MRConstants.YARN_MR_PREFIX
+ + "node.blacklisting.enable";
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java
new file mode 100644
index 0000000..9b7dc6c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java
@@ -0,0 +1,55 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.event.EventHandler;
+
+
+/**
+ * Context interface for sharing information across components in YARN App.
+ */
+@InterfaceAudience.Private
+public interface AppContext {
+
+ ApplicationId getApplicationID();
+
+ ApplicationAttemptId getApplicationAttemptId();
+
+ String getApplicationName();
+
+ long getStartTime();
+
+ CharSequence getUser();
+
+ Job getJob(JobId jobID);
+
+ Map<JobId, Job> getAllJobs();
+
+ EventHandler getEventHandler();
+
+ Clock getClock();
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
new file mode 100644
index 0000000..0da8933
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -0,0 +1,576 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import java.io.File;
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.LocalContainerLauncher;
+import org.apache.hadoop.mapred.TaskAttemptListenerImpl;
+import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
+import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
+import org.apache.hadoop.mapreduce.v2.MRConstants;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
+import org.apache.hadoop.mapreduce.v2.app.client.MRClientService;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherImpl;
+import org.apache.hadoop.mapreduce.v2.app.local.LocalContainerAllocator;
+import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
+import org.apache.hadoop.mapreduce.v2.app.recover.Recovery;
+import org.apache.hadoop.mapreduce.v2.app.recover.RecoveryService;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
+import org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator;
+import org.apache.hadoop.mapreduce.v2.app.speculate.DefaultSpeculator;
+import org.apache.hadoop.mapreduce.v2.app.speculate.Speculator;
+import org.apache.hadoop.mapreduce.v2.app.speculate.SpeculatorEvent;
+import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleaner;
+import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleanerImpl;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.SystemClock;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.service.CompositeService;
+import org.apache.hadoop.yarn.service.Service;
+
+/**
+ * The Map-Reduce Application Master.
+ * The state machine is encapsulated in the implementation of Job interface.
+ * All state changes happens via Job interface. Each event
+ * results in a Finite State Transition in Job.
+ *
+ * MR AppMaster is the composition of loosely coupled services. The services
+ * interact with each other via events. The components resembles the
+ * Actors model. The component acts on received event and send out the
+ * events to other components.
+ * This keeps it highly concurrent with no or minimal synchronization needs.
+ *
+ * The events are dispatched by a central Dispatch mechanism. All components
+ * register to the Dispatcher.
+ *
+ * The information is shared across different components using AppContext.
+ */
+
+public class MRAppMaster extends CompositeService {
+
+ private static final Log LOG = LogFactory.getLog(MRAppMaster.class);
+
+ private Clock clock;
+ private final long startTime = System.currentTimeMillis();
+ private String appName;
+ private final int startCount;
+ private final ApplicationId appID;
+ private final ApplicationAttemptId appAttemptID;
+ protected final MRAppMetrics metrics;
+ private Set<TaskId> completedTasksFromPreviousRun;
+ private AppContext context;
+ private Dispatcher dispatcher;
+ private ClientService clientService;
+ private ContainerAllocator containerAllocator;
+ private ContainerLauncher containerLauncher;
+ private TaskCleaner taskCleaner;
+ private Speculator speculator;
+ private TaskAttemptListener taskAttemptListener;
+ private JobTokenSecretManager jobTokenSecretManager =
+ new JobTokenSecretManager();
+
+ private Job job;
+
+ public MRAppMaster(ApplicationId applicationId, int startCount) {
+ this(applicationId, new SystemClock(), startCount);
+ }
+
+ public MRAppMaster(ApplicationId applicationId, Clock clock, int startCount) {
+ super(MRAppMaster.class.getName());
+ this.clock = clock;
+ this.appID = applicationId;
+ this.appAttemptID = RecordFactoryProvider.getRecordFactory(null)
+ .newRecordInstance(ApplicationAttemptId.class);
+ this.appAttemptID.setApplicationId(appID);
+ this.appAttemptID.setAttemptId(startCount);
+ this.startCount = startCount;
+ this.metrics = MRAppMetrics.create();
+ LOG.info("Created MRAppMaster for application " + applicationId);
+ }
+
+ @Override
+ public void init(final Configuration conf) {
+ context = new RunningAppContext();
+
+ // Job name is the same as the app name util we support DAG of jobs
+ // for an app later
+ appName = conf.get(MRJobConfig.JOB_NAME, "<missing app name>");
+
+ if (conf.getBoolean(AMConstants.RECOVERY_ENABLE, false)
+ && startCount > 1) {
+ LOG.info("Recovery is enabled. Will try to recover from previous life.");
+ Recovery recoveryServ = new RecoveryService(appID, clock, startCount);
+ addIfService(recoveryServ);
+ dispatcher = recoveryServ.getDispatcher();
+ clock = recoveryServ.getClock();
+ completedTasksFromPreviousRun = recoveryServ.getCompletedTasks();
+ } else {
+ dispatcher = new AsyncDispatcher();
+ addIfService(dispatcher);
+ }
+
+ //service to handle requests to TaskUmbilicalProtocol
+ taskAttemptListener = createTaskAttemptListener(context);
+ addIfService(taskAttemptListener);
+
+ //service to do the task cleanup
+ taskCleaner = createTaskCleaner(context);
+ addIfService(taskCleaner);
+
+ //service to handle requests from JobClient
+ clientService = createClientService(context);
+ addIfService(clientService);
+
+ //service to log job history events
+ EventHandler<JobHistoryEvent> historyService =
+ createJobHistoryHandler(context);
+ addIfService(historyService);
+
+ JobEventDispatcher synchronousJobEventDispatcher = new JobEventDispatcher();
+
+ //register the event dispatchers
+ dispatcher.register(JobEventType.class, synchronousJobEventDispatcher);
+ dispatcher.register(TaskEventType.class, new TaskEventDispatcher());
+ dispatcher.register(TaskAttemptEventType.class,
+ new TaskAttemptEventDispatcher());
+ dispatcher.register(TaskCleaner.EventType.class, taskCleaner);
+ dispatcher.register(org.apache.hadoop.mapreduce.jobhistory.EventType.class,
+ historyService);
+
+ if (conf.getBoolean(MRJobConfig.MAP_SPECULATIVE, false)
+ || conf.getBoolean(MRJobConfig.REDUCE_SPECULATIVE, false)) {
+ //optional service to speculate on task attempts' progress
+ speculator = createSpeculator(conf, context);
+ addIfService(speculator);
+ }
+
+ dispatcher.register(Speculator.EventType.class,
+ new SpeculatorEventDispatcher());
+
+ Credentials fsTokens = new Credentials();
+ if (UserGroupInformation.isSecurityEnabled()) {
+ // Read the file-system tokens from the localized tokens-file.
+ try {
+ Path jobSubmitDir =
+ FileContext.getLocalFSFileContext().makeQualified(
+ new Path(new File(MRConstants.JOB_SUBMIT_DIR)
+ .getAbsolutePath()));
+ Path jobTokenFile =
+ new Path(jobSubmitDir, MRConstants.APPLICATION_TOKENS_FILE);
+ fsTokens.addAll(Credentials.readTokenStorageFile(jobTokenFile, conf));
+ LOG.info("jobSubmitDir=" + jobSubmitDir + " jobTokenFile="
+ + jobTokenFile);
+
+ UserGroupInformation currentUser =
+ UserGroupInformation.getCurrentUser();
+ for (Token<? extends TokenIdentifier> tk : fsTokens.getAllTokens()) {
+ LOG.info(" --- DEBUG: Token of kind " + tk.getKind()
+ + "in current ugi in the AppMaster for service "
+ + tk.getService());
+ currentUser.addToken(tk); // For use by AppMaster itself.
+ }
+ } catch (IOException e) {
+ throw new YarnException(e);
+ }
+ }
+
+ super.init(conf);
+
+ //---- start of what used to be startJobs() code:
+
+ Configuration config = getConfig();
+
+ job = createJob(config, fsTokens);
+
+ /** create a job event for job intialization */
+ JobEvent initJobEvent = new JobEvent(job.getID(), JobEventType.JOB_INIT);
+ /** send init to the job (this does NOT trigger job execution) */
+ synchronousJobEventDispatcher.handle(initJobEvent);
+
+ // send init to speculator. This won't yest start as dispatcher isn't
+ // started yet.
+ dispatcher.getEventHandler().handle(
+ new SpeculatorEvent(job.getID(), clock.getTime()));
+
+ // JobImpl's InitTransition is done (call above is synchronous), so the
+ // "uber-decision" (MR-1220) has been made. Query job and switch to
+ // ubermode if appropriate (by registering different container-allocator
+ // and container-launcher services/event-handlers).
+
+ if (job.isUber()) {
+ LOG.info("MRAppMaster uberizing job " + job.getID()
+ + " in local container (\"uber-AM\").");
+ } else {
+ LOG.info("MRAppMaster launching normal, non-uberized, multi-container "
+ + "job " + job.getID() + ".");
+ }
+
+ // service to allocate containers from RM (if non-uber) or to fake it (uber)
+ containerAllocator =
+ createContainerAllocator(clientService, context, job.isUber());
+ addIfService(containerAllocator);
+ dispatcher.register(ContainerAllocator.EventType.class, containerAllocator);
+ if (containerAllocator instanceof Service) {
+ ((Service) containerAllocator).init(config);
+ }
+
+ // corresponding service to launch allocated containers via NodeManager
+ containerLauncher = createContainerLauncher(context, job.isUber());
+ addIfService(containerLauncher);
+ dispatcher.register(ContainerLauncher.EventType.class, containerLauncher);
+ if (containerLauncher instanceof Service) {
+ ((Service) containerLauncher).init(config);
+ }
+
+ } // end of init()
+
+ /** Create and initialize (but don't start) a single job.
+ * @param fsTokens */
+ protected Job createJob(Configuration conf, Credentials fsTokens) {
+
+ // create single job
+ Job newJob = new JobImpl(appID, conf, dispatcher.getEventHandler(),
+ taskAttemptListener, jobTokenSecretManager, fsTokens, clock, startCount,
+ completedTasksFromPreviousRun, metrics);
+ ((RunningAppContext) context).jobs.put(newJob.getID(), newJob);
+
+ dispatcher.register(JobFinishEvent.Type.class,
+ new EventHandler<JobFinishEvent>() {
+ @Override
+ public void handle(JobFinishEvent event) {
+ // job has finished
+ // this is the only job, so shut down the Appmaster
+ // note in a workflow scenario, this may lead to creation of a new
+ // job (FIXME?)
+
+ // TODO:currently just wait for some time so clients can know the
+ // final states. Will be removed once RM come on.
+ try {
+ Thread.sleep(5000);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ LOG.info("Calling stop for all the services");
+ try {
+ stop();
+ } catch (Throwable t) {
+ LOG.warn("Graceful stop failed ", t);
+ }
+ //TODO: this is required because rpc server does not shut down
+ // in spite of calling server.stop().
+ //Bring the process down by force.
+ //Not needed after HADOOP-7140
+ LOG.info("Exiting MR AppMaster..GoodBye!");
+ System.exit(0);
+ }
+ });
+
+ return newJob;
+ } // end createJob()
+
+ protected void addIfService(Object object) {
+ if (object instanceof Service) {
+ addService((Service) object);
+ }
+ }
+
+ protected EventHandler<JobHistoryEvent> createJobHistoryHandler(
+ AppContext context) {
+ JobHistoryEventHandler eventHandler = new JobHistoryEventHandler(context,
+ getStartCount());
+ return eventHandler;
+ }
+
+ protected Speculator createSpeculator(Configuration conf, AppContext context) {
+ Class<? extends Speculator> speculatorClass;
+
+ try {
+ speculatorClass
+ // "yarn.mapreduce.job.speculator.class"
+ = conf.getClass(AMConstants.SPECULATOR_CLASS,
+ DefaultSpeculator.class,
+ Speculator.class);
+ Constructor<? extends Speculator> speculatorConstructor
+ = speculatorClass.getConstructor
+ (Configuration.class, AppContext.class);
+ Speculator result = speculatorConstructor.newInstance(conf, context);
+
+ return result;
+ } catch (InstantiationException ex) {
+ LOG.error("Can't make a speculator -- check "
+ + AMConstants.SPECULATOR_CLASS + " " + ex);
+ throw new YarnException(ex);
+ } catch (IllegalAccessException ex) {
+ LOG.error("Can't make a speculator -- check "
+ + AMConstants.SPECULATOR_CLASS + " " + ex);
+ throw new YarnException(ex);
+ } catch (InvocationTargetException ex) {
+ LOG.error("Can't make a speculator -- check "
+ + AMConstants.SPECULATOR_CLASS + " " + ex);
+ throw new YarnException(ex);
+ } catch (NoSuchMethodException ex) {
+ LOG.error("Can't make a speculator -- check "
+ + AMConstants.SPECULATOR_CLASS + " " + ex);
+ throw new YarnException(ex);
+ }
+ }
+
+ protected TaskAttemptListener createTaskAttemptListener(AppContext context) {
+ TaskAttemptListener lis =
+ new TaskAttemptListenerImpl(context, jobTokenSecretManager);
+ return lis;
+ }
+
+ protected TaskCleaner createTaskCleaner(AppContext context) {
+ return new TaskCleanerImpl(context);
+ }
+
+ protected ContainerAllocator createContainerAllocator(
+ ClientService clientService, AppContext context, boolean isLocal) {
+ //return new StaticContainerAllocator(context);
+ return isLocal
+ ? new LocalContainerAllocator(clientService, context)
+ : new RMContainerAllocator(clientService, context);
+ }
+
+ protected ContainerLauncher createContainerLauncher(AppContext context,
+ boolean isLocal) {
+ return isLocal
+ ? new LocalContainerLauncher(context,
+ (TaskUmbilicalProtocol) taskAttemptListener)
+ : new ContainerLauncherImpl(context);
+ }
+
+ //TODO:should have an interface for MRClientService
+ protected ClientService createClientService(AppContext context) {
+ return new MRClientService(context);
+ }
+
+ public ApplicationId getAppID() {
+ return appID;
+ }
+
+ public int getStartCount() {
+ return startCount;
+ }
+
+ public AppContext getContext() {
+ return context;
+ }
+
+ public Dispatcher getDispatcher() {
+ return dispatcher;
+ }
+
+ public Set<TaskId> getCompletedTaskFromPreviousRun() {
+ return completedTasksFromPreviousRun;
+ }
+
+ public ContainerAllocator getContainerAllocator() {
+ return containerAllocator;
+ }
+
+ public ContainerLauncher getContainerLauncher() {
+ return containerLauncher;
+ }
+
+ public TaskAttemptListener getTaskAttemptListener() {
+ return taskAttemptListener;
+ }
+
+ class RunningAppContext implements AppContext {
+
+ private Map<JobId, Job> jobs = new ConcurrentHashMap<JobId, Job>();
+
+ @Override
+ public ApplicationAttemptId getApplicationAttemptId() {
+ return appAttemptID;
+ }
+
+ @Override
+ public ApplicationId getApplicationID() {
+ return appID;
+ }
+
+ @Override
+ public String getApplicationName() {
+ return appName;
+ }
+
+ @Override
+ public long getStartTime() {
+ return startTime;
+ }
+
+ @Override
+ public Job getJob(JobId jobID) {
+ return jobs.get(jobID);
+ }
+
+ @Override
+ public Map<JobId, Job> getAllJobs() {
+ return jobs;
+ }
+
+ @Override
+ public EventHandler getEventHandler() {
+ return dispatcher.getEventHandler();
+ }
+
+ @Override
+ public CharSequence getUser() {
+ return getConfig().get(MRJobConfig.USER_NAME);
+ }
+
+ @Override
+ public Clock getClock() {
+ return clock;
+ }
+ }
+
+ @Override
+ public void start() {
+ // metrics system init is really init & start.
+ // It's more test friendly to put it here.
+ DefaultMetricsSystem.initialize("MRAppMaster");
+
+ startJobs();
+ //start all the components
+ super.start();
+ }
+
+ /**
+ * This can be overridden to instantiate multiple jobs and create a
+ * workflow.
+ *
+ * TODO: Rework the design to actually support this. Currently much of the
+ * job stuff has been moved to init() above to support uberization (MR-1220).
+ * In a typical workflow, one presumably would want to uberize only a subset
+ * of the jobs (the "small" ones), which is awkward with the current design.
+ */
+ protected void startJobs() {
+ /** create a job-start event to get this ball rolling */
+ JobEvent startJobEvent = new JobEvent(job.getID(), JobEventType.JOB_START);
+ /** send the job-start event. this triggers the job execution. */
+ dispatcher.getEventHandler().handle(startJobEvent);
+ }
+
+ private class JobEventDispatcher implements EventHandler<JobEvent> {
+ @Override
+ public void handle(JobEvent event) {
+ ((EventHandler<JobEvent>)context.getJob(event.getJobId())).handle(event);
+ }
+ }
+
+ private class TaskEventDispatcher implements EventHandler<TaskEvent> {
+ @Override
+ public void handle(TaskEvent event) {
+ Task task = context.getJob(event.getTaskID().getJobId()).getTask(
+ event.getTaskID());
+ ((EventHandler<TaskEvent>)task).handle(event);
+ }
+ }
+
+ private class TaskAttemptEventDispatcher
+ implements EventHandler<TaskAttemptEvent> {
+ @Override
+ public void handle(TaskAttemptEvent event) {
+ Job job = context.getJob(event.getTaskAttemptID().getTaskId().getJobId());
+ Task task = job.getTask(event.getTaskAttemptID().getTaskId());
+ TaskAttempt attempt = task.getAttempt(event.getTaskAttemptID());
+ ((EventHandler<TaskAttemptEvent>) attempt).handle(event);
+ }
+ }
+
+ private class SpeculatorEventDispatcher implements
+ EventHandler<SpeculatorEvent> {
+ @Override
+ public void handle(SpeculatorEvent event) {
+ if (getConfig().getBoolean(MRJobConfig.MAP_SPECULATIVE, false)
+ || getConfig().getBoolean(MRJobConfig.REDUCE_SPECULATIVE, false)) {
+ // Speculator IS enabled, direct the event to there.
+ speculator.handle(event);
+ }
+ }
+ }
+
+ public static void main(String[] args) {
+ try {
+ //Configuration.addDefaultResource("job.xml");
+ ApplicationId applicationId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class);
+
+ applicationId.setClusterTimestamp(Long.valueOf(args[0]));
+ applicationId.setId(Integer.valueOf(args[1]));
+ int failCount = Integer.valueOf(args[2]);
+ MRAppMaster appMaster = new MRAppMaster(applicationId, failCount);
+ YarnConfiguration conf = new YarnConfiguration(new JobConf());
+ conf.addResource(new Path(MRConstants.JOB_CONF_FILE));
+ conf.set(MRJobConfig.USER_NAME,
+ System.getProperty("user.name"));
+ UserGroupInformation.setConfiguration(conf);
+ appMaster.init(conf);
+ appMaster.start();
+ } catch (Throwable t) {
+ LOG.error("Caught throwable. Exiting:", t);
+ System.exit(1);
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRClientSecurityInfo.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRClientSecurityInfo.java
new file mode 100644
index 0000000..10cb4e2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRClientSecurityInfo.java
@@ -0,0 +1,58 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import java.lang.annotation.Annotation;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.security.token.TokenSelector;
+import org.apache.hadoop.yarn.proto.MRClientProtocol;
+import org.apache.hadoop.yarn.security.ApplicationTokenSelector;
+
+public class MRClientSecurityInfo extends SecurityInfo {
+
+ @Override
+ public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
+ return null;
+ }
+
+ @Override
+ public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
+ if (!protocol.equals(MRClientProtocol.MRClientProtocolService.BlockingInterface.class)) {
+ return null;
+ }
+ return new TokenInfo() {
+
+ @Override
+ public Class<? extends Annotation> annotationType() {
+ return null;
+ }
+
+ @Override
+ public Class<? extends TokenSelector<? extends TokenIdentifier>>
+ value() {
+ return ApplicationTokenSelector.class;
+ }
+ };
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskAttemptListener.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskAttemptListener.java
new file mode 100644
index 0000000..9df88d6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskAttemptListener.java
@@ -0,0 +1,35 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.mapred.Task;
+import org.apache.hadoop.mapred.WrappedJvmID;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+
+public interface TaskAttemptListener {
+
+ InetSocketAddress getAddress();
+
+ void register(TaskAttemptId attemptID, Task task, WrappedJvmID jvmID);
+
+ void unregister(TaskAttemptId attemptID, WrappedJvmID jvmID);
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java
new file mode 100644
index 0000000..2218c88
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java
@@ -0,0 +1,137 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.service.AbstractService;
+
+
+/**
+ * This class keeps track of tasks that have already been launched. It
+ * determines if a task is alive and running or marks a task as dead if it does
+ * not hear from it for a long time.
+ *
+ */
+public class TaskHeartbeatHandler extends AbstractService {
+
+ private static final Log LOG = LogFactory.getLog(TaskHeartbeatHandler.class);
+
+ //thread which runs periodically to see the last time since a heartbeat is
+ //received from a task.
+ private Thread lostTaskCheckerThread;
+ private volatile boolean stopped;
+ private int taskTimeOut = 5*60*1000;//5 mins
+
+ private final EventHandler eventHandler;
+ private final Clock clock;
+
+ private Map<TaskAttemptId, Long> runningAttempts
+ = new HashMap<TaskAttemptId, Long>();
+
+ public TaskHeartbeatHandler(EventHandler eventHandler, Clock clock) {
+ super("TaskHeartbeatHandler");
+ this.eventHandler = eventHandler;
+ this.clock = clock;
+ }
+
+ @Override
+ public void init(Configuration conf) {
+ super.init(conf);
+ taskTimeOut = conf.getInt("mapreduce.task.timeout", 5*60*1000);
+ }
+
+ @Override
+ public void start() {
+ lostTaskCheckerThread = new Thread(new PingChecker());
+ lostTaskCheckerThread.start();
+ super.start();
+ }
+
+ @Override
+ public void stop() {
+ stopped = true;
+ lostTaskCheckerThread.interrupt();
+ super.stop();
+ }
+
+ public synchronized void receivedPing(TaskAttemptId attemptID) {
+ //only put for the registered attempts
+ if (runningAttempts.containsKey(attemptID)) {
+ runningAttempts.put(attemptID, clock.getTime());
+ }
+ }
+
+ public synchronized void register(TaskAttemptId attemptID) {
+ runningAttempts.put(attemptID, clock.getTime());
+ }
+
+ public synchronized void unregister(TaskAttemptId attemptID) {
+ runningAttempts.remove(attemptID);
+ }
+
+ private class PingChecker implements Runnable {
+
+ @Override
+ public void run() {
+ while (!stopped && !Thread.currentThread().isInterrupted()) {
+ synchronized (TaskHeartbeatHandler.this) {
+ Iterator<Map.Entry<TaskAttemptId, Long>> iterator =
+ runningAttempts.entrySet().iterator();
+
+ //avoid calculating current time everytime in loop
+ long currentTime = clock.getTime();
+
+ while (iterator.hasNext()) {
+ Map.Entry<TaskAttemptId, Long> entry = iterator.next();
+ if (currentTime > entry.getValue() + taskTimeOut) {
+ //task is lost, remove from the list and raise lost event
+ iterator.remove();
+ eventHandler.handle(
+ new TaskAttemptDiagnosticsUpdateEvent(entry.getKey(),
+ "AttemptID:" + entry.getKey().toString() +
+ " Timed out after " + taskTimeOut/1000 + " secs"));
+ eventHandler.handle(new TaskAttemptEvent(entry
+ .getKey(), TaskAttemptEventType.TA_TIMED_OUT));
+ }
+ }
+ }
+ try {
+ Thread.sleep(taskTimeOut);
+ } catch (InterruptedException e) {
+ LOG.info("TaskHeartbeatHandler thread interrupted");
+ break;
+ }
+ }
+ }
+
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/ClientService.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/ClientService.java
new file mode 100644
index 0000000..a4c0a0d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/ClientService.java
@@ -0,0 +1,28 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.client;
+
+import java.net.InetSocketAddress;
+
+public interface ClientService {
+
+ InetSocketAddress getBindAddress();
+
+ int getHttpPort();
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
new file mode 100644
index 0000000..504a941
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
@@ -0,0 +1,392 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.client;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
+import java.security.AccessControlException;
+import java.util.Arrays;
+import java.util.Collection;
+
+import org.apache.avro.ipc.Server;
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.AMConstants;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
+import org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.security.ApplicationTokenIdentifier;
+import org.apache.hadoop.yarn.security.SchedulerSecurityInfo;
+import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
+import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.webapp.WebApp;
+import org.apache.hadoop.yarn.webapp.WebApps;
+
+/**
+ * This module is responsible for talking to the
+ * jobclient (user facing).
+ *
+ */
+public class MRClientService extends AbstractService
+ implements ClientService {
+
+ static final Log LOG = LogFactory.getLog(MRClientService.class);
+
+ private MRClientProtocol protocolHandler;
+ private Server server;
+ private WebApp webApp;
+ private InetSocketAddress bindAddress;
+ private AppContext appContext;
+
+ public MRClientService(AppContext appContext) {
+ super("MRClientService");
+ this.appContext = appContext;
+ this.protocolHandler = new MRClientProtocolHandler();
+ }
+
+ public void start() {
+ Configuration conf = new Configuration(getConfig()); // Just for not messing up sec-info class config
+ YarnRPC rpc = YarnRPC.create(conf);
+ InetSocketAddress address = NetUtils.createSocketAddr("0.0.0.0:0");
+ InetAddress hostNameResolved = null;
+ try {
+ hostNameResolved = InetAddress.getLocalHost();
+ } catch (UnknownHostException e) {
+ throw new YarnException(e);
+ }
+
+ ClientToAMSecretManager secretManager = null;
+ if (UserGroupInformation.isSecurityEnabled()) {
+ secretManager = new ClientToAMSecretManager();
+ String secretKeyStr =
+ System
+ .getenv(ApplicationConstants.APPLICATION_CLIENT_SECRET_ENV_NAME);
+ byte[] bytes = Base64.decodeBase64(secretKeyStr);
+ ApplicationTokenIdentifier identifier =
+ new ApplicationTokenIdentifier(this.appContext.getApplicationID());
+ secretManager.setMasterKey(identifier, bytes);
+ conf.setClass(
+ YarnConfiguration.YARN_SECURITY_INFO,
+ SchedulerSecurityInfo.class, SecurityInfo.class); // Same for now.
+ }
+ server =
+ rpc.getServer(MRClientProtocol.class, protocolHandler, address,
+ conf, secretManager,
+ conf.getInt(AMConstants.AM_JOB_CLIENT_THREADS,
+ AMConstants.DEFAULT_AM_JOB_CLIENT_THREADS));
+ server.start();
+ this.bindAddress =
+ NetUtils.createSocketAddr(hostNameResolved.getHostAddress()
+ + ":" + server.getPort());
+ LOG.info("Instantiated MRClientService at " + this.bindAddress);
+ try {
+ webApp = WebApps.$for("yarn", AppContext.class, appContext).with(conf).
+ start(new AMWebApp());
+ } catch (Exception e) {
+ LOG.error("Webapps failed to start. Ignoring for now:", e);
+ }
+ super.start();
+ }
+
+ public void stop() {
+ server.close();
+ if (webApp != null) {
+ webApp.stop();
+ }
+ super.stop();
+ }
+
+ @Override
+ public InetSocketAddress getBindAddress() {
+ return bindAddress;
+ }
+
+ @Override
+ public int getHttpPort() {
+ return webApp.port();
+ }
+
+ class MRClientProtocolHandler implements MRClientProtocol {
+
+ private RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(null);
+
+ private Job verifyAndGetJob(JobId jobID,
+ boolean modifyAccess) throws YarnRemoteException {
+ Job job = appContext.getJob(jobID);
+ if (job == null) {
+ throw RPCUtil.getRemoteException("Unknown job " + jobID);
+ }
+ //TODO fix job acls.
+ //JobACL operation = JobACL.VIEW_JOB;
+ //if (modifyAccess) {
+ // operation = JobACL.MODIFY_JOB;
+ //}
+ //TO disable check access ofr now.
+ //checkAccess(job, operation);
+ return job;
+ }
+
+ private Task verifyAndGetTask(TaskId taskID,
+ boolean modifyAccess) throws YarnRemoteException {
+ Task task = verifyAndGetJob(taskID.getJobId(),
+ modifyAccess).getTask(taskID);
+ if (task == null) {
+ throw RPCUtil.getRemoteException("Unknown Task " + taskID);
+ }
+ return task;
+ }
+
+ private TaskAttempt verifyAndGetAttempt(TaskAttemptId attemptID,
+ boolean modifyAccess) throws YarnRemoteException {
+ TaskAttempt attempt = verifyAndGetTask(attemptID.getTaskId(),
+ modifyAccess).getAttempt(attemptID);
+ if (attempt == null) {
+ throw RPCUtil.getRemoteException("Unknown TaskAttempt " + attemptID);
+ }
+ return attempt;
+ }
+
+ private void checkAccess(Job job, JobACL jobOperation)
+ throws YarnRemoteException {
+ if (!UserGroupInformation.isSecurityEnabled()) {
+ return;
+ }
+ UserGroupInformation callerUGI;
+ try {
+ callerUGI = UserGroupInformation.getCurrentUser();
+ } catch (IOException e) {
+ throw RPCUtil.getRemoteException(e);
+ }
+ if(!job.checkAccess(callerUGI, jobOperation)) {
+ throw RPCUtil.getRemoteException(new AccessControlException("User "
+ + callerUGI.getShortUserName() + " cannot perform operation "
+ + jobOperation.name() + " on " + job.getID()));
+ }
+ }
+
+ @Override
+ public GetCountersResponse getCounters(GetCountersRequest request)
+ throws YarnRemoteException {
+ JobId jobId = request.getJobId();
+ Job job = verifyAndGetJob(jobId, false);
+ GetCountersResponse response =
+ recordFactory.newRecordInstance(GetCountersResponse.class);
+ response.setCounters(job.getCounters());
+ return response;
+ }
+
+ @Override
+ public GetJobReportResponse getJobReport(GetJobReportRequest request)
+ throws YarnRemoteException {
+ JobId jobId = request.getJobId();
+ Job job = verifyAndGetJob(jobId, false);
+ GetJobReportResponse response =
+ recordFactory.newRecordInstance(GetJobReportResponse.class);
+ response.setJobReport(job.getReport());
+ return response;
+ }
+
+
+ @Override
+ public GetTaskAttemptReportResponse getTaskAttemptReport(
+ GetTaskAttemptReportRequest request) throws YarnRemoteException {
+ TaskAttemptId taskAttemptId = request.getTaskAttemptId();
+ GetTaskAttemptReportResponse response =
+ recordFactory.newRecordInstance(GetTaskAttemptReportResponse.class);
+ response.setTaskAttemptReport(
+ verifyAndGetAttempt(taskAttemptId, false).getReport());
+ return response;
+ }
+
+ @Override
+ public GetTaskReportResponse getTaskReport(GetTaskReportRequest request)
+ throws YarnRemoteException {
+ TaskId taskId = request.getTaskId();
+ GetTaskReportResponse response =
+ recordFactory.newRecordInstance(GetTaskReportResponse.class);
+ response.setTaskReport(verifyAndGetTask(taskId, false).getReport());
+ return response;
+ }
+
+ @Override
+ public GetTaskAttemptCompletionEventsResponse getTaskAttemptCompletionEvents(
+ GetTaskAttemptCompletionEventsRequest request)
+ throws YarnRemoteException {
+ JobId jobId = request.getJobId();
+ int fromEventId = request.getFromEventId();
+ int maxEvents = request.getMaxEvents();
+ Job job = verifyAndGetJob(jobId, false);
+
+ GetTaskAttemptCompletionEventsResponse response =
+ recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsResponse.class);
+ response.addAllCompletionEvents(Arrays.asList(
+ job.getTaskAttemptCompletionEvents(fromEventId, maxEvents)));
+ return response;
+ }
+
+ @Override
+ public KillJobResponse killJob(KillJobRequest request)
+ throws YarnRemoteException {
+ JobId jobId = request.getJobId();
+ String message = "Kill Job received from client " + jobId;
+ LOG.info(message);
+ verifyAndGetJob(jobId, true);
+ appContext.getEventHandler().handle(
+ new JobDiagnosticsUpdateEvent(jobId, message));
+ appContext.getEventHandler().handle(
+ new JobEvent(jobId, JobEventType.JOB_KILL));
+ KillJobResponse response =
+ recordFactory.newRecordInstance(KillJobResponse.class);
+ return response;
+ }
+
+ @Override
+ public KillTaskResponse killTask(KillTaskRequest request)
+ throws YarnRemoteException {
+ TaskId taskId = request.getTaskId();
+ String message = "Kill task received from client " + taskId;
+ LOG.info(message);
+ verifyAndGetTask(taskId, true);
+ appContext.getEventHandler().handle(
+ new TaskEvent(taskId, TaskEventType.T_KILL));
+ KillTaskResponse response =
+ recordFactory.newRecordInstance(KillTaskResponse.class);
+ return response;
+ }
+
+ @Override
+ public KillTaskAttemptResponse killTaskAttempt(
+ KillTaskAttemptRequest request) throws YarnRemoteException {
+ TaskAttemptId taskAttemptId = request.getTaskAttemptId();
+ String message = "Kill task attempt received from client " + taskAttemptId;
+ LOG.info(message);
+ verifyAndGetAttempt(taskAttemptId, true);
+ appContext.getEventHandler().handle(
+ new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message));
+ appContext.getEventHandler().handle(
+ new TaskAttemptEvent(taskAttemptId,
+ TaskAttemptEventType.TA_KILL));
+ KillTaskAttemptResponse response =
+ recordFactory.newRecordInstance(KillTaskAttemptResponse.class);
+ return response;
+ }
+
+ @Override
+ public GetDiagnosticsResponse getDiagnostics(
+ GetDiagnosticsRequest request) throws YarnRemoteException {
+ TaskAttemptId taskAttemptId = request.getTaskAttemptId();
+
+ GetDiagnosticsResponse response =
+ recordFactory.newRecordInstance(GetDiagnosticsResponse.class);
+ response.addAllDiagnostics(
+ verifyAndGetAttempt(taskAttemptId, false).getDiagnostics());
+ return response;
+ }
+
+ @Override
+ public FailTaskAttemptResponse failTaskAttempt(
+ FailTaskAttemptRequest request) throws YarnRemoteException {
+ TaskAttemptId taskAttemptId = request.getTaskAttemptId();
+ String message = "Fail task attempt received from client " + taskAttemptId;
+ LOG.info(message);
+ verifyAndGetAttempt(taskAttemptId, true);
+ appContext.getEventHandler().handle(
+ new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message));
+ appContext.getEventHandler().handle(
+ new TaskAttemptEvent(taskAttemptId,
+ TaskAttemptEventType.TA_FAILMSG));
+ FailTaskAttemptResponse response = recordFactory.
+ newRecordInstance(FailTaskAttemptResponse.class);
+ return response;
+ }
+
+ @Override
+ public GetTaskReportsResponse getTaskReports(
+ GetTaskReportsRequest request) throws YarnRemoteException {
+ JobId jobId = request.getJobId();
+ TaskType taskType = request.getTaskType();
+
+ GetTaskReportsResponse response =
+ recordFactory.newRecordInstance(GetTaskReportsResponse.class);
+
+ Job job = verifyAndGetJob(jobId, false);
+ LOG.info("Getting task report for " + taskType + " " + jobId);
+ Collection<Task> tasks = job.getTasks(taskType).values();
+ LOG.info("Getting task report size " + tasks.size());
+ for (Task task : tasks) {
+ response.addTaskReport(task.getReport());
+ }
+ return response;
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
new file mode 100644
index 0000000..1be08ab
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
@@ -0,0 +1,59 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.security.UserGroupInformation;
+
+
+/**
+ * Main interface to interact with the job. Provides only getters.
+ */
+public interface Job {
+
+ JobId getID();
+ String getName();
+ JobState getState();
+ JobReport getReport();
+ Counters getCounters();
+ Map<TaskId,Task> getTasks();
+ Map<TaskId,Task> getTasks(TaskType taskType);
+ Task getTask(TaskId taskID);
+ List<String> getDiagnostics();
+ int getTotalMaps();
+ int getTotalReduces();
+ int getCompletedMaps();
+ int getCompletedReduces();
+ boolean isUber();
+
+ TaskAttemptCompletionEvent[]
+ getTaskAttemptCompletionEvents(int fromEventId, int maxEvents);
+
+ boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Task.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Task.java
new file mode 100644
index 0000000..84eb834
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Task.java
@@ -0,0 +1,58 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job;
+
+import java.util.Map;
+
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+
+/**
+ * Read only view of Task.
+ */
+public interface Task {
+ TaskId getID();
+ TaskReport getReport();
+ TaskState getState();
+ Counters getCounters();
+ float getProgress();
+ TaskType getType();
+ Map<TaskAttemptId, TaskAttempt> getAttempts();
+ TaskAttempt getAttempt(TaskAttemptId attemptID);
+
+ /** Has Task reached the final state or not.
+ */
+ boolean isFinished();
+
+ /**
+ * Can the output of the taskAttempt be committed. Note that once the task
+ * gives a go for a commit, further canCommit requests from any other attempts
+ * should return false.
+ *
+ * @param taskAttemptID
+ * @return whether the attempt's output can be committed or not.
+ */
+ boolean canCommit(TaskAttemptId taskAttemptID);
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java
new file mode 100644
index 0000000..bae1136
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java
@@ -0,0 +1,66 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job;
+
+import java.util.List;
+
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+
+
+/**
+ * Read only view of TaskAttempt.
+ */
+public interface TaskAttempt {
+ TaskAttemptId getID();
+ TaskAttemptReport getReport();
+ List<String> getDiagnostics();
+ Counters getCounters();
+ float getProgress();
+ TaskAttemptState getState();
+
+ /** Has attempt reached the final state or not.
+ */
+ boolean isFinished();
+
+ /**If container Assigned then return container ID, otherwise null.
+ */
+ ContainerId getAssignedContainerID();
+
+ /**If container Assigned then return container mgr address, otherwise null.
+ */
+ String getAssignedContainerMgrAddress();
+
+ /**If container Assigned then return the node's http address, otherwise null.
+ */
+ String getNodeHttpAddress();
+
+ /** Returns time at which container is launched. If container is not launched
+ * yet, returns 0.
+ */
+ long getLaunchTime();
+
+ /** Returns attempt's finish time. If attempt is not finished
+ * yet, returns 0.
+ */
+ long getFinishTime();
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobCounterUpdateEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobCounterUpdateEvent.java
new file mode 100644
index 0000000..de8e648
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobCounterUpdateEvent.java
@@ -0,0 +1,42 @@
+package org.apache.hadoop.mapreduce.v2.app.job.event;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+
+public class JobCounterUpdateEvent extends JobEvent {
+
+ List<CounterIncrementalUpdate> counterUpdates = null;
+
+ public JobCounterUpdateEvent(JobId jobId) {
+ super(jobId, JobEventType.JOB_COUNTER_UPDATE);
+ counterUpdates = new ArrayList<JobCounterUpdateEvent.CounterIncrementalUpdate>();
+ }
+
+ public void addCounterUpdate(Enum<?> key, long incrValue) {
+ counterUpdates.add(new CounterIncrementalUpdate(key, incrValue));
+ }
+
+ public List<CounterIncrementalUpdate> getCounterUpdates() {
+ return counterUpdates;
+ }
+
+ public static class CounterIncrementalUpdate {
+ Enum<?> key;
+ long incrValue;
+
+ public CounterIncrementalUpdate(Enum<?> key, long incrValue) {
+ this.key = key;
+ this.incrValue = incrValue;
+ }
+
+ public Enum<?> getCounterKey() {
+ return key;
+ }
+
+ public long getIncrementValue() {
+ return incrValue;
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobDiagnosticsUpdateEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobDiagnosticsUpdateEvent.java
new file mode 100644
index 0000000..7ea03f6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobDiagnosticsUpdateEvent.java
@@ -0,0 +1,36 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.event;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+
+
+public class JobDiagnosticsUpdateEvent extends JobEvent {
+
+ private String diagnosticUpdate;
+
+ public JobDiagnosticsUpdateEvent(JobId jobID, String diagnostic) {
+ super(jobID, JobEventType.JOB_DIAGNOSTIC_UPDATE);
+ this.diagnosticUpdate = diagnostic;
+ }
+
+ public String getDiagnosticUpdate() {
+ return this.diagnosticUpdate;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobEvent.java
new file mode 100644
index 0000000..4ffda6a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobEvent.java
@@ -0,0 +1,41 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.event;
+
+import org.apache.hadoop.yarn.event.AbstractEvent;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+
+/**
+ * This class encapsulates job related events.
+ *
+ */
+public class JobEvent extends AbstractEvent<JobEventType> {
+
+ private JobId jobID;
+
+ public JobEvent(JobId jobID, JobEventType type) {
+ super(type);
+ this.jobID = jobID;
+ }
+
+ public JobId getJobId() {
+ return jobID;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobEventType.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobEventType.java
new file mode 100644
index 0000000..e0223b1
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobEventType.java
@@ -0,0 +1,48 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.event;
+
+/**
+ * Event types handled by Job.
+ */
+public enum JobEventType {
+
+ //Producer:Client
+ JOB_KILL,
+
+ //Producer:MRAppMaster
+ JOB_INIT,
+ JOB_START,
+
+ //Producer:Task
+ JOB_TASK_COMPLETED,
+ JOB_MAP_TASK_RESCHEDULED,
+ JOB_TASK_ATTEMPT_COMPLETED,
+
+ //Producer:Job
+ JOB_COMPLETED,
+
+ //Producer:Any component
+ JOB_DIAGNOSTIC_UPDATE,
+ INTERNAL_ERROR,
+ JOB_COUNTER_UPDATE,
+
+ //Producer:TaskAttemptListener
+ JOB_TASK_ATTEMPT_FETCH_FAILURE
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobFinishEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobFinishEvent.java
new file mode 100644
index 0000000..a1a458f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobFinishEvent.java
@@ -0,0 +1,42 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.event;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.yarn.event.AbstractEvent;
+
+public class JobFinishEvent
+ extends AbstractEvent<JobFinishEvent.Type> {
+
+ public enum Type {
+ STATE_CHANGED
+ }
+
+ private JobId jobID;
+
+ public JobFinishEvent(JobId jobID) {
+ super(Type.STATE_CHANGED);
+ this.jobID = jobID;
+ }
+
+ public JobId getJobId() {
+ return jobID;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobMapTaskRescheduledEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobMapTaskRescheduledEvent.java
new file mode 100644
index 0000000..10b8c07
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobMapTaskRescheduledEvent.java
@@ -0,0 +1,38 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.event;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+
+
+
+public class JobMapTaskRescheduledEvent extends JobEvent {
+
+ private TaskId taskID;
+
+ public JobMapTaskRescheduledEvent(TaskId taskID) {
+ super(taskID.getJobId(), JobEventType.JOB_MAP_TASK_RESCHEDULED);
+ this.taskID = taskID;
+ }
+
+ public TaskId getTaskID() {
+ return taskID;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobTaskAttemptCompletedEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobTaskAttemptCompletedEvent.java
new file mode 100644
index 0000000..94479b2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobTaskAttemptCompletedEvent.java
@@ -0,0 +1,37 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.event;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
+
+
+public class JobTaskAttemptCompletedEvent extends JobEvent {
+
+ private TaskAttemptCompletionEvent completionEvent;
+
+ public JobTaskAttemptCompletedEvent(TaskAttemptCompletionEvent completionEvent) {
+ super(completionEvent.getAttemptId().getTaskId().getJobId(),
+ JobEventType.JOB_TASK_ATTEMPT_COMPLETED);
+ this.completionEvent = completionEvent;
+ }
+
+ public TaskAttemptCompletionEvent getCompletionEvent() {
+ return completionEvent;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobTaskAttemptFetchFailureEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobTaskAttemptFetchFailureEvent.java
new file mode 100644
index 0000000..37e2034
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobTaskAttemptFetchFailureEvent.java
@@ -0,0 +1,48 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.event;
+
+import java.util.List;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+
+
+
+public class JobTaskAttemptFetchFailureEvent extends JobEvent {
+
+ private final TaskAttemptId reduce;
+ private final List<TaskAttemptId> maps;
+
+ public JobTaskAttemptFetchFailureEvent(TaskAttemptId reduce,
+ List<TaskAttemptId> maps) {
+ super(reduce.getTaskId().getJobId(),
+ JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE);
+ this.reduce = reduce;
+ this.maps = maps;
+ }
+
+ public List<TaskAttemptId> getMaps() {
+ return maps;
+ }
+
+ public TaskAttemptId getReduce() {
+ return reduce;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobTaskEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobTaskEvent.java
new file mode 100644
index 0000000..b74f9c2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobTaskEvent.java
@@ -0,0 +1,43 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.event;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+
+
+public class JobTaskEvent extends JobEvent {
+
+ private TaskId taskID;
+ private TaskState taskState;
+
+ public JobTaskEvent(TaskId taskID, TaskState taskState) {
+ super(taskID.getJobId(), JobEventType.JOB_TASK_COMPLETED);
+ this.taskID = taskID;
+ this.taskState = taskState;
+ }
+
+ public TaskId getTaskID() {
+ return taskID;
+ }
+
+ public TaskState getState() {
+ return taskState;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptContainerAssignedEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptContainerAssignedEvent.java
new file mode 100644
index 0000000..dc12192
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptContainerAssignedEvent.java
@@ -0,0 +1,39 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.event;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.yarn.api.records.Container;
+
+
+
+public class TaskAttemptContainerAssignedEvent extends TaskAttemptEvent {
+
+ private final Container container;
+
+ public TaskAttemptContainerAssignedEvent(TaskAttemptId id,
+ Container container) {
+ super(id, TaskAttemptEventType.TA_ASSIGNED);
+ this.container = container;
+ }
+
+ public Container getContainer() {
+ return this.container;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptDiagnosticsUpdateEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptDiagnosticsUpdateEvent.java
new file mode 100644
index 0000000..9b30785
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptDiagnosticsUpdateEvent.java
@@ -0,0 +1,37 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.event;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+
+
+public class TaskAttemptDiagnosticsUpdateEvent extends TaskAttemptEvent {
+
+ private String diagnosticInfo;
+
+ public TaskAttemptDiagnosticsUpdateEvent(TaskAttemptId attemptID,
+ String diagnosticInfo) {
+ super(attemptID, TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE);
+ this.diagnosticInfo = diagnosticInfo;
+ }
+
+ public String getDiagnosticInfo() {
+ return diagnosticInfo;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptEvent.java
new file mode 100644
index 0000000..41b6b3e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptEvent.java
@@ -0,0 +1,41 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.event;
+
+import org.apache.hadoop.yarn.event.AbstractEvent;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+
+/**
+ * This class encapsulates task attempt related events.
+ *
+ */
+public class TaskAttemptEvent extends AbstractEvent<TaskAttemptEventType> {
+
+ private TaskAttemptId attemptID;
+
+ public TaskAttemptEvent(TaskAttemptId id, TaskAttemptEventType type) {
+ super(type);
+ this.attemptID = id;
+ }
+
+ public TaskAttemptId getTaskAttemptID() {
+ return attemptID;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptEventType.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptEventType.java
new file mode 100644
index 0000000..a6c6840
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptEventType.java
@@ -0,0 +1,55 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.event;
+
+/**
+ * Event types handled by TaskAttempt.
+ */
+public enum TaskAttemptEventType {
+
+ //Producer:Task
+ TA_SCHEDULE,
+ TA_RESCHEDULE,
+
+ //Producer:Client, Task
+ TA_KILL,
+
+ //Producer:ContainerAllocator
+ TA_ASSIGNED,
+ TA_CONTAINER_COMPLETED,
+
+ //Producer:ContainerLauncher
+ TA_CONTAINER_LAUNCHED,
+ TA_CONTAINER_LAUNCH_FAILED,
+ TA_CONTAINER_CLEANED,
+
+ //Producer:TaskAttemptListener
+ TA_DIAGNOSTICS_UPDATE,
+ TA_COMMIT_PENDING,
+ TA_DONE,
+ TA_FAILMSG,
+ TA_UPDATE,
+ TA_TIMED_OUT,
+
+ //Producer:TaskCleaner
+ TA_CLEANUP_DONE,
+
+ //Producer:Job
+ TA_TOO_MANY_FETCH_FAILURE,
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptStatusUpdateEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptStatusUpdateEvent.java
new file mode 100644
index 0000000..c364535
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptStatusUpdateEvent.java
@@ -0,0 +1,61 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.event;
+
+import java.util.List;
+
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.v2.api.records.Phase;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+
+
+public class TaskAttemptStatusUpdateEvent extends TaskAttemptEvent {
+
+ private TaskAttemptStatus reportedTaskAttemptStatus;
+
+ public TaskAttemptStatusUpdateEvent(TaskAttemptId id,
+ TaskAttemptStatus taskAttemptStatus) {
+ super(id, TaskAttemptEventType.TA_UPDATE);
+ this.reportedTaskAttemptStatus = taskAttemptStatus;
+ }
+
+ public TaskAttemptStatus getReportedTaskAttemptStatus() {
+ return reportedTaskAttemptStatus;
+ }
+
+ /**
+ * The internal TaskAttemptStatus object corresponding to remote Task status.
+ *
+ */
+ public static class TaskAttemptStatus {
+ public TaskAttemptId id;
+ public float progress;
+ public Counters counters;
+ public String diagnosticInfo;
+ public String stateString;
+ public Phase phase;
+ public long outputSize;
+ public List<TaskAttemptId> fetchFailedMaps;
+ public long mapFinishTime;
+ public long shuffleFinishTime;
+ public long sortFinishTime;
+ public TaskAttemptState taskState;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskEvent.java
new file mode 100644
index 0000000..5c8e97b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskEvent.java
@@ -0,0 +1,40 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.event;
+
+import org.apache.hadoop.yarn.event.AbstractEvent;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+
+/**
+ * this class encapsulates task related events.
+ *
+ */
+public class TaskEvent extends AbstractEvent<TaskEventType> {
+
+ private TaskId taskID;
+
+ public TaskEvent(TaskId taskID, TaskEventType type) {
+ super(type);
+ this.taskID = taskID;
+ }
+
+ public TaskId getTaskID() {
+ return taskID;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskEventType.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskEventType.java
new file mode 100644
index 0000000..d385e2f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskEventType.java
@@ -0,0 +1,41 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.event;
+
+/**
+ * Event types handled by Task.
+ */
+public enum TaskEventType {
+
+ //Producer:Client, Job
+ T_KILL,
+
+ //Producer:Job
+ T_SCHEDULE,
+
+ //Producer:Speculator
+ T_ADD_SPEC_ATTEMPT,
+
+ //Producer:TaskAttempt
+ T_ATTEMPT_LAUNCHED,
+ T_ATTEMPT_COMMIT_PENDING,
+ T_ATTEMPT_FAILED,
+ T_ATTEMPT_SUCCEEDED,
+ T_ATTEMPT_KILLED
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskTAttemptEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskTAttemptEvent.java
new file mode 100644
index 0000000..d9bd3d5
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskTAttemptEvent.java
@@ -0,0 +1,37 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.event;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+
+
+public class TaskTAttemptEvent extends TaskEvent {
+
+ private TaskAttemptId attemptID;
+
+ public TaskTAttemptEvent(TaskAttemptId id, TaskEventType type) {
+ super(id.getTaskId(), type);
+ this.attemptID = id;
+ }
+
+ public TaskAttemptId getTaskAttemptID() {
+ return attemptID;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
new file mode 100644
index 0000000..0416d3d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
@@ -0,0 +1,1416 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.FileOutputCommitter;
+import org.apache.hadoop.mapred.JobACLsManager;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.OutputFormat;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.jobhistory.JobFinishedEvent;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
+import org.apache.hadoop.mapreduce.jobhistory.JobInfoChangeEvent;
+import org.apache.hadoop.mapreduce.jobhistory.JobInitedEvent;
+import org.apache.hadoop.mapreduce.jobhistory.JobSubmittedEvent;
+import org.apache.hadoop.mapreduce.jobhistory.JobUnsuccessfulCompletionEvent;
+import org.apache.hadoop.mapreduce.lib.chain.ChainMapper;
+import org.apache.hadoop.mapreduce.lib.chain.ChainReducer;
+import org.apache.hadoop.mapreduce.security.TokenCache;
+import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
+import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
+import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
+import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader;
+import org.apache.hadoop.mapreduce.task.JobContextImpl;
+import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
+import org.apache.hadoop.mapreduce.v2.MRConstants;
+import org.apache.hadoop.mapreduce.v2.api.records.Counter;
+import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptCompletedEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptFetchFailureEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
+import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.MultipleArcTransition;
+import org.apache.hadoop.yarn.state.SingleArcTransition;
+import org.apache.hadoop.yarn.state.StateMachine;
+import org.apache.hadoop.yarn.state.StateMachineFactory;
+
+/** Implementation of Job interface. Maintains the state machines of Job.
+ * The read and write calls use ReadWriteLock for concurrency.
+ */
+public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
+ EventHandler<JobEvent> {
+
+ private static final Log LOG = LogFactory.getLog(JobImpl.class);
+
+ //The maximum fraction of fetch failures allowed for a map
+ private static final double MAX_ALLOWED_FETCH_FAILURES_FRACTION = 0.5;
+
+ // Maximum no. of fetch-failure notifications after which map task is failed
+ private static final int MAX_FETCH_FAILURES_NOTIFICATIONS = 3;
+
+ private final RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(null);
+
+ //final fields
+ private final Clock clock;
+ private final JobACLsManager aclsManager;
+ private final String username;
+ private final Map<JobACL, AccessControlList> jobACLs;
+ private final int startCount;
+ private final Set<TaskId> completedTasksFromPreviousRun;
+ private final Lock readLock;
+ private final Lock writeLock;
+ private final JobId jobId;
+ private final String jobName;
+ private final org.apache.hadoop.mapreduce.JobID oldJobId;
+ private final TaskAttemptListener taskAttemptListener;
+ private final Object tasksSyncHandle = new Object();
+ private final Set<TaskId> mapTasks = new LinkedHashSet<TaskId>();
+ private final Set<TaskId> reduceTasks = new LinkedHashSet<TaskId>();
+ private final EventHandler eventHandler;
+ private final MRAppMetrics metrics;
+
+ private boolean lazyTasksCopyNeeded = false;
+ private volatile Map<TaskId, Task> tasks = new LinkedHashMap<TaskId, Task>();
+ private Counters jobCounters = newCounters();
+ // FIXME:
+ //
+ // Can then replace task-level uber counters (MR-2424) with job-level ones
+ // sent from LocalContainerLauncher, and eventually including a count of
+ // of uber-AM attempts (probably sent from MRAppMaster).
+ public Configuration conf;
+
+ //fields initialized in init
+ private FileSystem fs;
+ private Path remoteJobSubmitDir;
+ public Path remoteJobConfFile;
+ private JobContext jobContext;
+ private OutputCommitter committer;
+ private int allowedMapFailuresPercent = 0;
+ private int allowedReduceFailuresPercent = 0;
+ private List<TaskAttemptCompletionEvent> taskAttemptCompletionEvents;
+ private final List<String> diagnostics = new ArrayList<String>();
+
+ //task/attempt related datastructures
+ private final Map<TaskId, Integer> successAttemptCompletionEventNoMap =
+ new HashMap<TaskId, Integer>();
+ private final Map<TaskAttemptId, Integer> fetchFailuresMapping =
+ new HashMap<TaskAttemptId, Integer>();
+
+ private static final DiagnosticsUpdateTransition
+ DIAGNOSTIC_UPDATE_TRANSITION = new DiagnosticsUpdateTransition();
+ private static final InternalErrorTransition
+ INTERNAL_ERROR_TRANSITION = new InternalErrorTransition();
+ private static final TaskAttemptCompletedEventTransition
+ TASK_ATTEMPT_COMPLETED_EVENT_TRANSITION =
+ new TaskAttemptCompletedEventTransition();
+ private static final CounterUpdateTransition COUNTER_UPDATE_TRANSITION =
+ new CounterUpdateTransition();
+
+ protected static final
+ StateMachineFactory<JobImpl, JobState, JobEventType, JobEvent>
+ stateMachineFactory
+ = new StateMachineFactory<JobImpl, JobState, JobEventType, JobEvent>
+ (JobState.NEW)
+
+ // Transitions from NEW state
+ .addTransition(JobState.NEW, JobState.NEW,
+ JobEventType.JOB_DIAGNOSTIC_UPDATE,
+ DIAGNOSTIC_UPDATE_TRANSITION)
+ .addTransition(JobState.NEW, JobState.NEW,
+ JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
+ .addTransition
+ (JobState.NEW,
+ EnumSet.of(JobState.INITED, JobState.FAILED),
+ JobEventType.JOB_INIT,
+ new InitTransition())
+ .addTransition(JobState.NEW, JobState.KILLED,
+ JobEventType.JOB_KILL,
+ new KillNewJobTransition())
+ .addTransition(JobState.NEW, JobState.ERROR,
+ JobEventType.INTERNAL_ERROR,
+ INTERNAL_ERROR_TRANSITION)
+
+ // Transitions from INITED state
+ .addTransition(JobState.INITED, JobState.INITED,
+ JobEventType.JOB_DIAGNOSTIC_UPDATE,
+ DIAGNOSTIC_UPDATE_TRANSITION)
+ .addTransition(JobState.INITED, JobState.INITED,
+ JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
+ .addTransition(JobState.INITED, JobState.RUNNING,
+ JobEventType.JOB_START,
+ new StartTransition())
+ .addTransition(JobState.INITED, JobState.KILLED,
+ JobEventType.JOB_KILL,
+ new KillInitedJobTransition())
+ .addTransition(JobState.INITED, JobState.ERROR,
+ JobEventType.INTERNAL_ERROR,
+ INTERNAL_ERROR_TRANSITION)
+
+ // Transitions from RUNNING state
+ .addTransition(JobState.RUNNING, JobState.RUNNING,
+ JobEventType.JOB_TASK_ATTEMPT_COMPLETED,
+ TASK_ATTEMPT_COMPLETED_EVENT_TRANSITION)
+ .addTransition
+ (JobState.RUNNING,
+ EnumSet.of(JobState.RUNNING, JobState.SUCCEEDED, JobState.FAILED),
+ JobEventType.JOB_TASK_COMPLETED,
+ new TaskCompletedTransition())
+ .addTransition
+ (JobState.RUNNING,
+ EnumSet.of(JobState.RUNNING, JobState.SUCCEEDED, JobState.FAILED),
+ JobEventType.JOB_COMPLETED,
+ new JobNoTasksCompletedTransition())
+ .addTransition(JobState.RUNNING, JobState.KILL_WAIT,
+ JobEventType.JOB_KILL, new KillTasksTransition())
+ .addTransition(JobState.RUNNING, JobState.RUNNING,
+ JobEventType.JOB_MAP_TASK_RESCHEDULED,
+ new MapTaskRescheduledTransition())
+ .addTransition(JobState.RUNNING, JobState.RUNNING,
+ JobEventType.JOB_DIAGNOSTIC_UPDATE,
+ DIAGNOSTIC_UPDATE_TRANSITION)
+ .addTransition(JobState.RUNNING, JobState.RUNNING,
+ JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
+ .addTransition(JobState.RUNNING, JobState.RUNNING,
+ JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE,
+ new TaskAttemptFetchFailureTransition())
+ .addTransition(
+ JobState.RUNNING,
+ JobState.ERROR, JobEventType.INTERNAL_ERROR,
+ INTERNAL_ERROR_TRANSITION)
+
+ // Transitions from KILL_WAIT state.
+ .addTransition
+ (JobState.KILL_WAIT,
+ EnumSet.of(JobState.KILL_WAIT, JobState.KILLED),
+ JobEventType.JOB_TASK_COMPLETED,
+ new KillWaitTaskCompletedTransition())
+ .addTransition(JobState.KILL_WAIT, JobState.KILL_WAIT,
+ JobEventType.JOB_TASK_ATTEMPT_COMPLETED,
+ TASK_ATTEMPT_COMPLETED_EVENT_TRANSITION)
+ .addTransition(JobState.KILL_WAIT, JobState.KILL_WAIT,
+ JobEventType.JOB_DIAGNOSTIC_UPDATE,
+ DIAGNOSTIC_UPDATE_TRANSITION)
+ .addTransition(JobState.KILL_WAIT, JobState.KILL_WAIT,
+ JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
+ .addTransition(
+ JobState.KILL_WAIT,
+ JobState.ERROR, JobEventType.INTERNAL_ERROR,
+ INTERNAL_ERROR_TRANSITION)
+ // Ignore-able events
+ .addTransition(JobState.KILL_WAIT, JobState.KILL_WAIT,
+ EnumSet.of(JobEventType.JOB_KILL,
+ JobEventType.JOB_MAP_TASK_RESCHEDULED,
+ JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE))
+
+ // Transitions from SUCCEEDED state
+ .addTransition(JobState.SUCCEEDED, JobState.SUCCEEDED,
+ JobEventType.JOB_DIAGNOSTIC_UPDATE,
+ DIAGNOSTIC_UPDATE_TRANSITION)
+ .addTransition(JobState.SUCCEEDED, JobState.SUCCEEDED,
+ JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
+ .addTransition(
+ JobState.SUCCEEDED,
+ JobState.ERROR, JobEventType.INTERNAL_ERROR,
+ INTERNAL_ERROR_TRANSITION)
+ // Ignore-able events
+ .addTransition(JobState.SUCCEEDED, JobState.SUCCEEDED,
+ EnumSet.of(JobEventType.JOB_KILL,
+ JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE))
+
+ // Transitions from FAILED state
+ .addTransition(JobState.FAILED, JobState.FAILED,
+ JobEventType.JOB_DIAGNOSTIC_UPDATE,
+ DIAGNOSTIC_UPDATE_TRANSITION)
+ .addTransition(JobState.FAILED, JobState.FAILED,
+ JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
+ .addTransition(
+ JobState.FAILED,
+ JobState.ERROR, JobEventType.INTERNAL_ERROR,
+ INTERNAL_ERROR_TRANSITION)
+ // Ignore-able events
+ .addTransition(JobState.FAILED, JobState.FAILED,
+ EnumSet.of(JobEventType.JOB_KILL,
+ JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE))
+
+ // Transitions from KILLED state
+ .addTransition(JobState.KILLED, JobState.KILLED,
+ JobEventType.JOB_DIAGNOSTIC_UPDATE,
+ DIAGNOSTIC_UPDATE_TRANSITION)
+ .addTransition(JobState.KILLED, JobState.KILLED,
+ JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
+ .addTransition(
+ JobState.KILLED,
+ JobState.ERROR, JobEventType.INTERNAL_ERROR,
+ INTERNAL_ERROR_TRANSITION)
+ // Ignore-able events
+ .addTransition(JobState.KILLED, JobState.KILLED,
+ EnumSet.of(JobEventType.JOB_KILL,
+ JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE))
+
+ // No transitions from INTERNAL_ERROR state. Ignore all.
+ .addTransition(
+ JobState.ERROR,
+ JobState.ERROR,
+ EnumSet.of(JobEventType.JOB_INIT,
+ JobEventType.JOB_KILL,
+ JobEventType.JOB_TASK_COMPLETED,
+ JobEventType.JOB_TASK_ATTEMPT_COMPLETED,
+ JobEventType.JOB_MAP_TASK_RESCHEDULED,
+ JobEventType.JOB_DIAGNOSTIC_UPDATE,
+ JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE,
+ JobEventType.INTERNAL_ERROR))
+
+ // create the topology tables
+ .installTopology();
+
+ private final StateMachine<JobState, JobEventType, JobEvent> stateMachine;
+
+ //changing fields while the job is running
+ private int numMapTasks;
+ private int numReduceTasks;
+ private int completedTaskCount = 0;
+ private int succeededMapTaskCount = 0;
+ private int succeededReduceTaskCount = 0;
+ private int failedMapTaskCount = 0;
+ private int failedReduceTaskCount = 0;
+ private int killedMapTaskCount = 0;
+ private int killedReduceTaskCount = 0;
+ private long submitTime;
+ private long startTime;
+ private long finishTime;
+ private float setupProgress;
+ private float cleanupProgress;
+ private boolean isUber = false;
+
+ private Credentials fsTokens;
+ private Token<JobTokenIdentifier> jobToken;
+ private JobTokenSecretManager jobTokenSecretManager;
+
+ public JobImpl(ApplicationId appID, Configuration conf,
+ EventHandler eventHandler, TaskAttemptListener taskAttemptListener,
+ JobTokenSecretManager jobTokenSecretManager,
+ Credentials fsTokenCredentials, Clock clock, int startCount,
+ Set<TaskId> completedTasksFromPreviousRun, MRAppMetrics metrics) {
+
+ this.jobId = recordFactory.newRecordInstance(JobId.class);
+ this.jobName = conf.get(JobContext.JOB_NAME, "<missing job name>");
+ this.conf = conf;
+ this.metrics = metrics;
+ this.clock = clock;
+ this.completedTasksFromPreviousRun = completedTasksFromPreviousRun;
+ this.startCount = startCount;
+ jobId.setAppId(appID);
+ jobId.setId(appID.getId());
+ oldJobId = TypeConverter.fromYarn(jobId);
+ LOG.info("Job created" +
+ " appId=" + appID +
+ " jobId=" + jobId +
+ " oldJobId=" + oldJobId);
+
+ this.taskAttemptListener = taskAttemptListener;
+ this.eventHandler = eventHandler;
+ ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
+ this.readLock = readWriteLock.readLock();
+ this.writeLock = readWriteLock.writeLock();
+
+ this.fsTokens = fsTokenCredentials;
+ this.jobTokenSecretManager = jobTokenSecretManager;
+
+ this.aclsManager = new JobACLsManager(conf);
+ this.username = System.getProperty("user.name");
+ this.jobACLs = aclsManager.constructJobACLs(conf);
+ // This "this leak" is okay because the retained pointer is in an
+ // instance variable.
+ stateMachine = stateMachineFactory.make(this);
+ }
+
+ protected StateMachine<JobState, JobEventType, JobEvent> getStateMachine() {
+ return stateMachine;
+ }
+
+ @Override
+ public JobId getID() {
+ return jobId;
+ }
+
+ // Getter methods that make unit testing easier (package-scoped)
+ OutputCommitter getCommitter() {
+ return this.committer;
+ }
+
+ EventHandler getEventHandler() {
+ return this.eventHandler;
+ }
+
+ JobContext getJobContext() {
+ return this.jobContext;
+ }
+
+ @Override
+ public boolean checkAccess(UserGroupInformation callerUGI,
+ JobACL jobOperation) {
+ if (!UserGroupInformation.isSecurityEnabled()) {
+ return true;
+ }
+ AccessControlList jobACL = jobACLs.get(jobOperation);
+ return aclsManager.checkAccess(callerUGI, jobOperation, username, jobACL);
+ }
+
+ @Override
+ public Task getTask(TaskId taskID) {
+ readLock.lock();
+ try {
+ return tasks.get(taskID);
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public int getCompletedMaps() {
+ readLock.lock();
+ try {
+ return succeededMapTaskCount + failedMapTaskCount + killedMapTaskCount;
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public int getCompletedReduces() {
+ readLock.lock();
+ try {
+ return succeededReduceTaskCount + failedReduceTaskCount
+ + killedReduceTaskCount;
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public boolean isUber() {
+ return isUber;
+ }
+
+ @Override
+ public Counters getCounters() {
+ Counters counters = newCounters();
+ readLock.lock();
+ try {
+ incrAllCounters(counters, jobCounters);
+ return incrTaskCounters(counters, tasks.values());
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ private Counters getTypeCounters(Set<TaskId> taskIds) {
+ Counters counters = newCounters();
+ for (TaskId taskId : taskIds) {
+ Task task = tasks.get(taskId);
+ incrAllCounters(counters, task.getCounters());
+ }
+ return counters;
+ }
+
+ private Counters getMapCounters() {
+ readLock.lock();
+ try {
+ return getTypeCounters(mapTasks);
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ private Counters getReduceCounters() {
+ readLock.lock();
+ try {
+ return getTypeCounters(reduceTasks);
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ public static Counters newCounters() {
+ Counters counters = RecordFactoryProvider.getRecordFactory(null)
+ .newRecordInstance(Counters.class);
+ return counters;
+ }
+
+ public static Counters incrTaskCounters(Counters counters,
+ Collection<Task> tasks) {
+ for (Task task : tasks) {
+ incrAllCounters(counters, task.getCounters());
+ }
+ return counters;
+ }
+
+ public static void incrAllCounters(Counters counters, Counters other) {
+ if (other != null) {
+ for (CounterGroup otherGroup: other.getAllCounterGroups().values()) {
+ CounterGroup group = counters.getCounterGroup(otherGroup.getName());
+ if (group == null) {
+ group = RecordFactoryProvider.getRecordFactory(null)
+ .newRecordInstance(CounterGroup.class);
+ group.setName(otherGroup.getName());
+ counters.setCounterGroup(group.getName(), group);
+ }
+ group.setDisplayName(otherGroup.getDisplayName());
+ for (Counter otherCounter : otherGroup.getAllCounters().values()) {
+ Counter counter = group.getCounter(otherCounter.getName());
+ if (counter == null) {
+ counter = RecordFactoryProvider.getRecordFactory(null)
+ .newRecordInstance(Counter.class);
+ counter.setName(otherCounter.getName());
+ group.setCounter(counter.getName(), counter);
+ }
+ counter.setDisplayName(otherCounter.getDisplayName());
+ counter.setValue(counter.getValue() + otherCounter.getValue());
+ }
+ }
+ }
+ }
+
+ @Override
+ public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
+ int fromEventId, int maxEvents) {
+ TaskAttemptCompletionEvent[] events = new TaskAttemptCompletionEvent[0];
+ readLock.lock();
+ try {
+ if (taskAttemptCompletionEvents.size() > fromEventId) {
+ int actualMax = Math.min(maxEvents,
+ (taskAttemptCompletionEvents.size() - fromEventId));
+ events = taskAttemptCompletionEvents.subList(fromEventId,
+ actualMax + fromEventId).toArray(events);
+ }
+ return events;
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public List<String> getDiagnostics() {
+ readLock.lock();
+ try {
+ return diagnostics;
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public JobReport getReport() {
+ readLock.lock();
+ try {
+ JobReport report = recordFactory.newRecordInstance(JobReport.class);
+ report.setJobId(jobId);
+ report.setJobState(getState());
+
+ // TODO - Fix to correctly setup report and to check state
+ if (report.getJobState() == JobState.NEW) {
+ return report;
+ }
+
+ report.setStartTime(startTime);
+ report.setFinishTime(finishTime);
+ report.setSetupProgress(setupProgress);
+ report.setCleanupProgress(cleanupProgress);
+ report.setMapProgress(computeProgress(mapTasks));
+ report.setReduceProgress(computeProgress(reduceTasks));
+
+ return report;
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ private float computeProgress(Set<TaskId> taskIds) {
+ readLock.lock();
+ try {
+ float progress = 0;
+ for (TaskId taskId : taskIds) {
+ Task task = tasks.get(taskId);
+ progress += task.getProgress();
+ }
+ int taskIdsSize = taskIds.size();
+ if (taskIdsSize != 0) {
+ progress = progress/taskIdsSize;
+ }
+ return progress;
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public Map<TaskId, Task> getTasks() {
+ synchronized (tasksSyncHandle) {
+ lazyTasksCopyNeeded = true;
+ return Collections.unmodifiableMap(tasks);
+ }
+ }
+
+ @Override
+ public Map<TaskId,Task> getTasks(TaskType taskType) {
+ Map<TaskId, Task> localTasksCopy = tasks;
+ Map<TaskId, Task> result = new HashMap<TaskId, Task>();
+ Set<TaskId> tasksOfGivenType = null;
+ readLock.lock();
+ try {
+ if (TaskType.MAP == taskType) {
+ tasksOfGivenType = mapTasks;
+ } else {
+ tasksOfGivenType = reduceTasks;
+ }
+ for (TaskId taskID : tasksOfGivenType)
+ result.put(taskID, localTasksCopy.get(taskID));
+ return result;
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public JobState getState() {
+ readLock.lock();
+ try {
+ return getStateMachine().getCurrentState();
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ protected void scheduleTasks(Set<TaskId> taskIDs) {
+ for (TaskId taskID : taskIDs) {
+ eventHandler.handle(new TaskEvent(taskID,
+ TaskEventType.T_SCHEDULE));
+ }
+ }
+
+ @Override
+ /**
+ * The only entry point to change the Job.
+ */
+ public void handle(JobEvent event) {
+ LOG.info("Processing " + event.getJobId() + " of type " + event.getType());
+ try {
+ writeLock.lock();
+ JobState oldState = getState();
+ try {
+ getStateMachine().doTransition(event.getType(), event);
+ } catch (InvalidStateTransitonException e) {
+ LOG.error("Can't handle this event at current state", e);
+ addDiagnostic("Invalid event " + event.getType() +
+ " on Job " + this.jobId);
+ eventHandler.handle(new JobEvent(this.jobId,
+ JobEventType.INTERNAL_ERROR));
+ }
+ //notify the eventhandler of state change
+ if (oldState != getState()) {
+ LOG.info(jobId + "Job Transitioned from " + oldState + " to "
+ + getState());
+ }
+ }
+
+ finally {
+ writeLock.unlock();
+ }
+ }
+
+ //helpful in testing
+ protected void addTask(Task task) {
+ synchronized (tasksSyncHandle) {
+ if (lazyTasksCopyNeeded) {
+ Map<TaskId, Task> newTasks = new LinkedHashMap<TaskId, Task>();
+ newTasks.putAll(tasks);
+ tasks = newTasks;
+ lazyTasksCopyNeeded = false;
+ }
+ }
+ tasks.put(task.getID(), task);
+ if (task.getType() == TaskType.MAP) {
+ mapTasks.add(task.getID());
+ } else if (task.getType() == TaskType.REDUCE) {
+ reduceTasks.add(task.getID());
+ }
+ metrics.waitingTask(task);
+ }
+
+ void setFinishTime() {
+ finishTime = clock.getTime();
+ }
+
+ void logJobHistoryFinishedEvent() {
+ this.setFinishTime();
+ JobFinishedEvent jfe = createJobFinishedEvent(this);
+ LOG.info("Calling handler for JobFinishedEvent ");
+ this.getEventHandler().handle(new JobHistoryEvent(this.jobId, jfe));
+ }
+
+ static JobState checkJobCompleteSuccess(JobImpl job) {
+ // check for Job success
+ if (job.completedTaskCount == job.getTasks().size()) {
+ try {
+ // Commit job & do cleanup
+ job.getCommitter().commitJob(job.getJobContext());
+ } catch (IOException e) {
+ LOG.warn("Could not do commit for Job", e);
+ }
+
+ job.logJobHistoryFinishedEvent();
+ return job.finished(JobState.SUCCEEDED);
+ }
+ return null;
+ }
+
+ JobState finished(JobState finalState) {
+ if (getState() == JobState.RUNNING) {
+ metrics.endRunningJob(this);
+ }
+ if (finishTime == 0) setFinishTime();
+ eventHandler.handle(new JobFinishEvent(jobId));
+
+ switch (finalState) {
+ case KILLED:
+ metrics.killedJob(this);
+ break;
+ case FAILED:
+ metrics.failedJob(this);
+ break;
+ case SUCCEEDED:
+ metrics.completedJob(this);
+ }
+ return finalState;
+ }
+
+ @Override
+ public String getName() {
+ return jobName;
+ }
+
+ @Override
+ public int getTotalMaps() {
+ return mapTasks.size(); //FIXME: why indirection? return numMapTasks...
+ // unless race? how soon can this get called?
+ }
+
+ @Override
+ public int getTotalReduces() {
+ return reduceTasks.size(); //FIXME: why indirection? return numReduceTasks
+ }
+
+ public static class InitTransition
+ implements MultipleArcTransition<JobImpl, JobEvent, JobState> {
+
+ /**
+ * Note that this transition method is called directly (and synchronously)
+ * by MRAppMaster's init() method (i.e., no RPC, no thread-switching;
+ * just plain sequential call within AM context), so we can trigger
+ * modifications in AM state from here (at least, if AM is written that
+ * way; MR version is).
+ */
+ @Override
+ public JobState transition(JobImpl job, JobEvent event) {
+ job.submitTime = job.clock.getTime();
+ job.metrics.submittedJob(job);
+ job.metrics.preparingJob(job);
+ try {
+ setup(job);
+ job.fs = FileSystem.get(job.conf);
+
+ //log to job history
+ JobSubmittedEvent jse = new JobSubmittedEvent(job.oldJobId,
+ job.conf.get(MRJobConfig.JOB_NAME, "test"),
+ job.conf.get(MRJobConfig.USER_NAME, "mapred"),
+ job.submitTime,
+ job.remoteJobConfFile.toString(),
+ job.jobACLs, job.conf.get(MRJobConfig.QUEUE_NAME, "default"));
+ job.eventHandler.handle(new JobHistoryEvent(job.jobId, jse));
+ //TODO JH Verify jobACLs, UserName via UGI?
+
+ TaskSplitMetaInfo[] taskSplitMetaInfo = createSplits(job, job.jobId);
+ job.numMapTasks = taskSplitMetaInfo.length;
+ job.numReduceTasks = job.conf.getInt(MRJobConfig.NUM_REDUCES, 0);
+
+ if (job.numMapTasks == 0 && job.numReduceTasks == 0) {
+ job.addDiagnostic("No of maps and reduces are 0 " + job.jobId);
+ }
+
+ checkTaskLimits();
+
+
+ boolean newApiCommitter = false;
+ if ((job.numReduceTasks > 0 &&
+ job.conf.getBoolean("mapred.reducer.new-api", false)) ||
+ (job.numReduceTasks == 0 &&
+ job.conf.getBoolean("mapred.mapper.new-api", false))) {
+ newApiCommitter = true;
+ LOG.info("Using mapred newApiCommitter.");
+ }
+
+ LOG.info("OutputCommitter set in config " + job.conf.get("mapred.output.committer.class"));
+
+ if (newApiCommitter) {
+ job.jobContext = new JobContextImpl(job.conf,
+ job.oldJobId);
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = RecordFactoryProvider
+ .getRecordFactory(null)
+ .newRecordInstance(
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId.class);
+ attemptID.setTaskId(RecordFactoryProvider.getRecordFactory(null)
+ .newRecordInstance(TaskId.class));
+ attemptID.getTaskId().setJobId(job.jobId);
+ attemptID.getTaskId().setTaskType(TaskType.MAP);
+ TaskAttemptContext taskContext = new TaskAttemptContextImpl(job.conf,
+ TypeConverter.fromYarn(attemptID));
+ try {
+ OutputFormat outputFormat = ReflectionUtils.newInstance(
+ taskContext.getOutputFormatClass(), job.conf);
+ job.committer = outputFormat.getOutputCommitter(taskContext);
+ } catch(Exception e) {
+ throw new IOException("Failed to assign outputcommitter", e);
+ }
+ } else {
+ job.jobContext = new org.apache.hadoop.mapred.JobContextImpl(
+ new JobConf(job.conf), job.oldJobId);
+ job.committer = ReflectionUtils.newInstance(
+ job.conf.getClass("mapred.output.committer.class", FileOutputCommitter.class,
+ org.apache.hadoop.mapred.OutputCommitter.class), job.conf);
+ }
+ LOG.info("OutputCommitter is " + job.committer.getClass().getName());
+
+ long inputLength = 0;
+ for (int i = 0; i < job.numMapTasks; ++i) {
+ inputLength += taskSplitMetaInfo[i].getInputDataLength();
+ }
+
+//FIXME: need new memory criterion for uber-decision (oops, too late here; until AM-resizing supported, must depend on job client to pass fat-slot needs)
+ // these are no longer "system" settings, necessarily; user may override
+ int sysMaxMaps = job.conf.getInt(MRJobConfig.JOB_UBERTASK_MAXMAPS, 9);
+ int sysMaxReduces =
+ job.conf.getInt(MRJobConfig.JOB_UBERTASK_MAXREDUCES, 1);
+ long sysMaxBytes = job.conf.getLong(MRJobConfig.JOB_UBERTASK_MAXBYTES,
+ job.conf.getLong("dfs.block.size", 64*1024*1024)); //FIXME: this is wrong; get FS from [File?]InputFormat and default block size from that
+ //long sysMemSizeForUberSlot = JobTracker.getMemSizeForReduceSlot(); // FIXME [could use default AM-container memory size...]
+
+ boolean uberEnabled =
+ job.conf.getBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
+ boolean smallNumMapTasks = (job.numMapTasks <= sysMaxMaps);
+ boolean smallNumReduceTasks = (job.numReduceTasks <= sysMaxReduces);
+ boolean smallInput = (inputLength <= sysMaxBytes);
+ boolean smallMemory = true; //FIXME (see above)
+ // ignoring overhead due to UberTask and statics as negligible here:
+// FIXME && (Math.max(memoryPerMap, memoryPerReduce) <= sysMemSizeForUberSlot
+// || sysMemSizeForUberSlot == JobConf.DISABLED_MEMORY_LIMIT)
+ boolean notChainJob = !isChainJob(job.conf);
+
+ // User has overall veto power over uberization, or user can modify
+ // limits (overriding system settings and potentially shooting
+ // themselves in the head). Note that ChainMapper/Reducer are
+ // fundamentally incompatible with MR-1220; they employ a blocking
+
+ // User has overall veto power over uberization, or user can modify
+ // limits (overriding system settings and potentially shooting
+ // themselves in the head). Note that ChainMapper/Reducer are
+ // fundamentally incompatible with MR-1220; they employ a blocking
+ // queue between the maps/reduces and thus require parallel execution,
+ // while "uber-AM" (MR AM + LocalContainerLauncher) loops over tasks
+ // and thus requires sequential execution.
+ job.isUber = uberEnabled && smallNumMapTasks && smallNumReduceTasks
+ && smallInput && smallMemory && notChainJob;
+
+ if (job.isUber) {
+ LOG.info("Uberizing job " + job.jobId + ": " + job.numMapTasks + "m+"
+ + job.numReduceTasks + "r tasks (" + inputLength
+ + " input bytes) will run sequentially on single node.");
+ //TODO: also note which node?
+
+ // make sure reduces are scheduled only after all map are completed
+ job.conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART,
+ 1.0f);
+ // uber-subtask attempts all get launched on same node; if one fails,
+ // probably should retry elsewhere, i.e., move entire uber-AM: ergo,
+ // limit attempts to 1 (or at most 2? probably not...)
+ job.conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
+ job.conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 1);
+
+ // disable speculation: makes no sense to speculate an entire job
+// canSpeculateMaps = canSpeculateReduces = false; // [TODO: in old version, ultimately was from conf.getMapSpeculativeExecution(), conf.getReduceSpeculativeExecution()]
+ } else {
+ StringBuilder msg = new StringBuilder();
+ msg.append("Not uberizing ").append(job.jobId).append(" because:");
+ if (!uberEnabled)
+ msg.append(" not enabled;");
+ if (!smallNumMapTasks)
+ msg.append(" too many maps;");
+ if (!smallNumReduceTasks)
+ msg.append(" too many reduces;");
+ if (!smallInput)
+ msg.append(" too much input;");
+ if (!smallMemory)
+ msg.append(" too much RAM;");
+ if (!notChainJob)
+ msg.append(" chainjob");
+ LOG.info(msg.toString());
+ }
+
+ job.taskAttemptCompletionEvents =
+ new ArrayList<TaskAttemptCompletionEvent>(
+ job.numMapTasks + job.numReduceTasks + 10);
+
+ job.allowedMapFailuresPercent =
+ job.conf.getInt(MRJobConfig.MAP_FAILURES_MAX_PERCENT, 0);
+ job.allowedReduceFailuresPercent =
+ job.conf.getInt(MRJobConfig.REDUCE_FAILURES_MAXPERCENT, 0);
+
+ // do the setup
+ job.committer.setupJob(job.jobContext);
+ job.setupProgress = 1.0f;
+
+ // create the Tasks but don't start them yet
+ createMapTasks(job, inputLength, taskSplitMetaInfo);
+ createReduceTasks(job);
+
+ job.metrics.endPreparingJob(job);
+ return JobState.INITED;
+ //TODO XXX Should JobInitedEvent be generated here (instead of in StartTransition)
+
+ } catch (IOException e) {
+ LOG.warn("Job init failed", e);
+ job.addDiagnostic("Job init failed : "
+ + StringUtils.stringifyException(e));
+ job.abortJob(org.apache.hadoop.mapreduce.JobStatus.State.FAILED);
+ job.metrics.endPreparingJob(job);
+ return job.finished(JobState.FAILED);
+ }
+ }
+
+ protected void setup(JobImpl job) throws IOException {
+
+ String oldJobIDString = job.oldJobId.toString();
+ String user =
+ UserGroupInformation.getCurrentUser().getShortUserName();
+ Path path = MRApps.getStagingAreaDir(job.conf, user);
+ LOG.info("DEBUG --- startJobs:"
+ + " parent="
+ + path + " child="
+ + oldJobIDString);
+
+ job.remoteJobSubmitDir =
+ FileSystem.get(job.conf).makeQualified(
+ new Path(path, oldJobIDString));
+ job.remoteJobConfFile =
+ new Path(job.remoteJobSubmitDir, MRConstants.JOB_CONF_FILE);
+
+ // Prepare the TaskAttemptListener server for authentication of Containers
+ // TaskAttemptListener gets the information via jobTokenSecretManager.
+ JobTokenIdentifier identifier =
+ new JobTokenIdentifier(new Text(oldJobIDString));
+ job.jobToken =
+ new Token<JobTokenIdentifier>(identifier, job.jobTokenSecretManager);
+ job.jobToken.setService(identifier.getJobId());
+ // Add it to the jobTokenSecretManager so that TaskAttemptListener server
+ // can authenticate containers(tasks)
+ job.jobTokenSecretManager.addTokenForJob(oldJobIDString, job.jobToken);
+ LOG.info("Adding job token for " + oldJobIDString
+ + " to jobTokenSecretManager");
+
+ // Upload the jobTokens onto the remote FS so that ContainerManager can
+ // localize it to be used by the Containers(tasks)
+ Credentials tokenStorage = new Credentials();
+ TokenCache.setJobToken(job.jobToken, tokenStorage);
+
+ if (UserGroupInformation.isSecurityEnabled()) {
+ tokenStorage.addAll(job.fsTokens);
+ }
+
+ Path remoteJobTokenFile =
+ new Path(job.remoteJobSubmitDir,
+ MRConstants.APPLICATION_TOKENS_FILE);
+ tokenStorage.writeTokenStorageFile(remoteJobTokenFile, job.conf);
+ LOG.info("Writing back the job-token file on the remote file system:"
+ + remoteJobTokenFile.toString());
+ }
+
+ /**
+ * ChainMapper and ChainReducer must execute in parallel, so they're not
+ * compatible with uberization/LocalContainerLauncher (100% sequential).
+ */
+ boolean isChainJob(Configuration conf) {
+ boolean isChainJob = false;
+ try {
+ String mapClassName = conf.get(MRJobConfig.MAP_CLASS_ATTR);
+ if (mapClassName != null) {
+ Class<?> mapClass = Class.forName(mapClassName);
+ if (ChainMapper.class.isAssignableFrom(mapClass))
+ isChainJob = true;
+ }
+ } catch (ClassNotFoundException cnfe) {
+ // don't care; assume it's not derived from ChainMapper
+ }
+ try {
+ String reduceClassName = conf.get(MRJobConfig.REDUCE_CLASS_ATTR);
+ if (reduceClassName != null) {
+ Class<?> reduceClass = Class.forName(reduceClassName);
+ if (ChainReducer.class.isAssignableFrom(reduceClass))
+ isChainJob = true;
+ }
+ } catch (ClassNotFoundException cnfe) {
+ // don't care; assume it's not derived from ChainReducer
+ }
+ return isChainJob;
+ }
+
+ private void createMapTasks(JobImpl job, long inputLength,
+ TaskSplitMetaInfo[] splits) {
+ for (int i=0; i < job.numMapTasks; ++i) {
+ TaskImpl task =
+ new MapTaskImpl(job.jobId, i,
+ job.eventHandler,
+ job.remoteJobConfFile,
+ job.conf, splits[i],
+ job.taskAttemptListener,
+ job.committer, job.jobToken, job.fsTokens.getAllTokens(),
+ job.clock, job.completedTasksFromPreviousRun, job.startCount,
+ job.metrics);
+ job.addTask(task);
+ }
+ LOG.info("Input size for job " + job.jobId + " = " + inputLength
+ + ". Number of splits = " + splits.length);
+ }
+
+ private void createReduceTasks(JobImpl job) {
+ for (int i = 0; i < job.numReduceTasks; i++) {
+ TaskImpl task =
+ new ReduceTaskImpl(job.jobId, i,
+ job.eventHandler,
+ job.remoteJobConfFile,
+ job.conf, job.numMapTasks,
+ job.taskAttemptListener, job.committer, job.jobToken,
+ job.fsTokens.getAllTokens(), job.clock,
+ job.completedTasksFromPreviousRun, job.startCount, job.metrics);
+ job.addTask(task);
+ }
+ LOG.info("Number of reduces for job " + job.jobId + " = "
+ + job.numReduceTasks);
+ }
+
+ protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
+ TaskSplitMetaInfo[] allTaskSplitMetaInfo;
+ try {
+ allTaskSplitMetaInfo = SplitMetaInfoReader.readSplitMetaInfo(
+ job.oldJobId, job.fs,
+ job.conf,
+ job.remoteJobSubmitDir);
+ } catch (IOException e) {
+ throw new YarnException(e);
+ }
+ return allTaskSplitMetaInfo;
+ }
+
+ /**
+ * If the number of tasks are greater than the configured value
+ * throw an exception that will fail job initialization
+ */
+ private void checkTaskLimits() {
+ // no code, for now
+ }
+ } // end of InitTransition
+
+ public static class StartTransition
+ implements SingleArcTransition<JobImpl, JobEvent> {
+ /**
+ * This transition executes in the event-dispatcher thread, though it's
+ * triggered in MRAppMaster's startJobs() method.
+ */
+ @Override
+ public void transition(JobImpl job, JobEvent event) {
+ job.startTime = job.clock.getTime();
+ job.scheduleTasks(job.mapTasks); // schedule (i.e., start) the maps
+ job.scheduleTasks(job.reduceTasks);
+ JobInitedEvent jie =
+ new JobInitedEvent(job.oldJobId,
+ job.startTime,
+ job.numMapTasks, job.numReduceTasks,
+ job.getState().toString()); //Will transition to state running. Currently in INITED
+ job.eventHandler.handle(new JobHistoryEvent(job.jobId, jie));
+ JobInfoChangeEvent jice = new JobInfoChangeEvent(job.oldJobId,
+ job.submitTime, job.startTime);
+ job.eventHandler.handle(new JobHistoryEvent(job.jobId, jice));
+ job.metrics.runningJob(job);
+
+ // If we have no tasks, just transition to job completed
+ if (job.numReduceTasks == 0 && job.numMapTasks == 0) {
+ job.eventHandler.handle(new JobEvent(job.jobId, JobEventType.JOB_COMPLETED));
+ }
+ }
+ }
+
+ private void abortJob(
+ org.apache.hadoop.mapreduce.JobStatus.State finalState) {
+ try {
+ committer.abortJob(jobContext, finalState);
+ } catch (IOException e) {
+ LOG.warn("Could not abortJob", e);
+ }
+ if (finishTime == 0) setFinishTime();
+ cleanupProgress = 1.0f;
+ JobUnsuccessfulCompletionEvent unsuccessfulJobEvent =
+ new JobUnsuccessfulCompletionEvent(oldJobId,
+ finishTime,
+ succeededMapTaskCount,
+ succeededReduceTaskCount,
+ finalState.toString());
+ eventHandler.handle(new JobHistoryEvent(jobId, unsuccessfulJobEvent));
+ }
+
+ // JobFinishedEvent triggers the move of the history file out of the staging
+ // area. May need to create a new event type for this if JobFinished should
+ // not be generated for KilledJobs, etc.
+ private static JobFinishedEvent createJobFinishedEvent(JobImpl job) {
+ JobFinishedEvent jfe = new JobFinishedEvent(
+ job.oldJobId, job.finishTime,
+ job.succeededMapTaskCount, job.succeededReduceTaskCount,
+ job.failedMapTaskCount, job.failedReduceTaskCount,
+ TypeConverter.fromYarn(job.getMapCounters()),
+ TypeConverter.fromYarn(job.getReduceCounters()),
+ TypeConverter.fromYarn(job.getCounters()));
+ return jfe;
+ }
+
+ // Task-start has been moved out of InitTransition, so this arc simply
+ // hardcodes 0 for both map and reduce finished tasks.
+ private static class KillNewJobTransition
+ implements SingleArcTransition<JobImpl, JobEvent> {
+ @Override
+ public void transition(JobImpl job, JobEvent event) {
+ job.setFinishTime();
+ JobUnsuccessfulCompletionEvent failedEvent =
+ new JobUnsuccessfulCompletionEvent(job.oldJobId,
+ job.finishTime, 0, 0,
+ JobState.KILLED.toString());
+ job.eventHandler.handle(new JobHistoryEvent(job.jobId, failedEvent));
+ job.finished(JobState.KILLED);
+ }
+ }
+
+ private static class KillInitedJobTransition
+ implements SingleArcTransition<JobImpl, JobEvent> {
+ @Override
+ public void transition(JobImpl job, JobEvent event) {
+ job.abortJob(org.apache.hadoop.mapreduce.JobStatus.State.KILLED);
+ job.addDiagnostic("Job received Kill in INITED state.");
+ job.finished(JobState.KILLED);
+ }
+ }
+
+ private static class KillTasksTransition
+ implements SingleArcTransition<JobImpl, JobEvent> {
+ @Override
+ public void transition(JobImpl job, JobEvent event) {
+ job.addDiagnostic("Job received Kill while in RUNNING state.");
+ for (Task task : job.tasks.values()) {
+ job.eventHandler.handle(
+ new TaskEvent(task.getID(), TaskEventType.T_KILL));
+ }
+ job.metrics.endRunningJob(job);
+ }
+ }
+
+ private static class TaskAttemptCompletedEventTransition implements
+ SingleArcTransition<JobImpl, JobEvent> {
+ @Override
+ public void transition(JobImpl job, JobEvent event) {
+ TaskAttemptCompletionEvent tce =
+ ((JobTaskAttemptCompletedEvent) event).getCompletionEvent();
+ // Add the TaskAttemptCompletionEvent
+ //eventId is equal to index in the arraylist
+ tce.setEventId(job.taskAttemptCompletionEvents.size());
+ job.taskAttemptCompletionEvents.add(tce);
+
+ //make the previous completion event as obsolete if it exists
+ Object successEventNo =
+ job.successAttemptCompletionEventNoMap.remove(tce.getAttemptId().getTaskId());
+ if (successEventNo != null) {
+ TaskAttemptCompletionEvent successEvent =
+ job.taskAttemptCompletionEvents.get((Integer) successEventNo);
+ successEvent.setStatus(TaskAttemptCompletionEventStatus.OBSOLETE);
+ }
+
+ if (TaskAttemptCompletionEventStatus.SUCCEEDED.equals(tce.getStatus())) {
+ job.successAttemptCompletionEventNoMap.put(tce.getAttemptId().getTaskId(),
+ tce.getEventId());
+ }
+ }
+ }
+
+ private static class TaskAttemptFetchFailureTransition implements
+ SingleArcTransition<JobImpl, JobEvent> {
+ @Override
+ public void transition(JobImpl job, JobEvent event) {
+ JobTaskAttemptFetchFailureEvent fetchfailureEvent =
+ (JobTaskAttemptFetchFailureEvent) event;
+ for (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId mapId :
+ fetchfailureEvent.getMaps()) {
+ Integer fetchFailures = job.fetchFailuresMapping.get(mapId);
+ fetchFailures = (fetchFailures == null) ? 1 : (fetchFailures+1);
+ job.fetchFailuresMapping.put(mapId, fetchFailures);
+
+ //get number of running reduces
+ int runningReduceTasks = 0;
+ for (TaskId taskId : job.reduceTasks) {
+ if (TaskState.RUNNING.equals(job.tasks.get(taskId).getState())) {
+ runningReduceTasks++;
+ }
+ }
+
+ float failureRate = (float) fetchFailures / runningReduceTasks;
+ // declare faulty if fetch-failures >= max-allowed-failures
+ boolean isMapFaulty =
+ (failureRate >= MAX_ALLOWED_FETCH_FAILURES_FRACTION);
+ if (fetchFailures >= MAX_FETCH_FAILURES_NOTIFICATIONS && isMapFaulty) {
+ LOG.info("Too many fetch-failures for output of task attempt: " +
+ mapId + " ... raising fetch failure to map");
+ job.eventHandler.handle(new TaskAttemptEvent(mapId,
+ TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
+ job.fetchFailuresMapping.remove(mapId);
+ }
+ }
+ }
+ }
+
+ private static class TaskCompletedTransition implements
+ MultipleArcTransition<JobImpl, JobEvent, JobState> {
+
+ @Override
+ public JobState transition(JobImpl job, JobEvent event) {
+ job.completedTaskCount++;
+ LOG.info("Num completed Tasks: " + job.completedTaskCount);
+ JobTaskEvent taskEvent = (JobTaskEvent) event;
+ Task task = job.tasks.get(taskEvent.getTaskID());
+ if (taskEvent.getState() == TaskState.SUCCEEDED) {
+ taskSucceeded(job, task);
+ } else if (taskEvent.getState() == TaskState.FAILED) {
+ taskFailed(job, task);
+ } else if (taskEvent.getState() == TaskState.KILLED) {
+ taskKilled(job, task);
+ }
+
+ return checkJobForCompletion(job);
+ }
+
+ protected JobState checkJobForCompletion(JobImpl job) {
+ //check for Job failure
+ if (job.failedMapTaskCount*100 >
+ job.allowedMapFailuresPercent*job.numMapTasks ||
+ job.failedReduceTaskCount*100 >
+ job.allowedReduceFailuresPercent*job.numReduceTasks) {
+ job.setFinishTime();
+
+ String diagnosticMsg = "Job failed as tasks failed. " +
+ "failedMaps:" + job.failedMapTaskCount +
+ " failedReduces:" + job.failedReduceTaskCount;
+ LOG.info(diagnosticMsg);
+ job.addDiagnostic(diagnosticMsg);
+ job.abortJob(org.apache.hadoop.mapreduce.JobStatus.State.FAILED);
+ return job.finished(JobState.FAILED);
+ }
+
+ JobState jobCompleteSuccess = JobImpl.checkJobCompleteSuccess(job);
+ if (jobCompleteSuccess != null) {
+ return jobCompleteSuccess;
+ }
+
+ //return the current state, Job not finished yet
+ return job.getState();
+ }
+
+ private void taskSucceeded(JobImpl job, Task task) {
+ if (task.getType() == TaskType.MAP) {
+ job.succeededMapTaskCount++;
+ } else {
+ job.succeededReduceTaskCount++;
+ }
+ job.metrics.completedTask(task);
+ }
+
+ private void taskFailed(JobImpl job, Task task) {
+ if (task.getType() == TaskType.MAP) {
+ job.failedMapTaskCount++;
+ } else if (task.getType() == TaskType.REDUCE) {
+ job.failedReduceTaskCount++;
+ }
+ job.addDiagnostic("Task failed " + task.getID());
+ job.metrics.failedTask(task);
+ }
+
+ private void taskKilled(JobImpl job, Task task) {
+ if (task.getType() == TaskType.MAP) {
+ job.killedMapTaskCount++;
+ } else if (task.getType() == TaskType.REDUCE) {
+ job.killedReduceTaskCount++;
+ }
+ job.metrics.killedTask(task);
+ }
+ }
+
+ // Transition class for handling jobs with no tasks
+ static class JobNoTasksCompletedTransition implements
+ MultipleArcTransition<JobImpl, JobEvent, JobState> {
+
+ @Override
+ public JobState transition(JobImpl job, JobEvent event) {
+ JobState jobCompleteSuccess = JobImpl.checkJobCompleteSuccess(job);
+ if (jobCompleteSuccess != null) {
+ return jobCompleteSuccess;
+ }
+
+ // Return the current state, Job not finished yet
+ return job.getState();
+ }
+ }
+
+ private static class MapTaskRescheduledTransition implements
+ SingleArcTransition<JobImpl, JobEvent> {
+ @Override
+ public void transition(JobImpl job, JobEvent event) {
+ //succeeded map task is restarted back
+ job.completedTaskCount--;
+ job.succeededMapTaskCount--;
+ }
+ }
+
+ private static class KillWaitTaskCompletedTransition extends
+ TaskCompletedTransition {
+ @Override
+ protected JobState checkJobForCompletion(JobImpl job) {
+ if (job.completedTaskCount == job.tasks.size()) {
+ job.setFinishTime();
+ job.abortJob(org.apache.hadoop.mapreduce.JobStatus.State.KILLED);
+ return job.finished(JobState.KILLED);
+ }
+ //return the current state, Job not finished yet
+ return job.getState();
+ }
+ }
+
+ private void addDiagnostic(String diag) {
+ diagnostics.add(diag);
+ }
+
+ private static class DiagnosticsUpdateTransition implements
+ SingleArcTransition<JobImpl, JobEvent> {
+ @Override
+ public void transition(JobImpl job, JobEvent event) {
+ job.addDiagnostic(((JobDiagnosticsUpdateEvent) event)
+ .getDiagnosticUpdate());
+ }
+ }
+
+ private static class CounterUpdateTransition implements
+ SingleArcTransition<JobImpl, JobEvent> {
+ @Override
+ public void transition(JobImpl job, JobEvent event) {
+ JobCounterUpdateEvent jce = (JobCounterUpdateEvent) event;
+ for (JobCounterUpdateEvent.CounterIncrementalUpdate ci : jce
+ .getCounterUpdates()) {
+ job.jobCounters.incrCounter(ci.getCounterKey(), ci.getIncrementValue());
+ }
+ }
+ }
+
+ private static class InternalErrorTransition implements
+ SingleArcTransition<JobImpl, JobEvent> {
+ @Override
+ public void transition(JobImpl job, JobEvent event) {
+ //TODO Is this JH event required.
+ job.setFinishTime();
+ JobUnsuccessfulCompletionEvent failedEvent =
+ new JobUnsuccessfulCompletionEvent(job.oldJobId,
+ job.finishTime, 0, 0,
+ JobState.ERROR.toString());
+ job.eventHandler.handle(new JobHistoryEvent(job.jobId, failedEvent));
+ job.finished(JobState.ERROR);
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/MapTaskImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/MapTaskImpl.java
new file mode 100644
index 0000000..119cc51
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/MapTaskImpl.java
@@ -0,0 +1,97 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.impl;
+
+import java.util.Collection;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.MapTaskAttemptImpl;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
+import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
+import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.event.EventHandler;
+
+
+public class MapTaskImpl extends TaskImpl {
+
+ private final TaskSplitMetaInfo taskSplitMetaInfo;
+
+ public MapTaskImpl(JobId jobId, int partition, EventHandler eventHandler,
+ Path remoteJobConfFile, Configuration conf,
+ TaskSplitMetaInfo taskSplitMetaInfo,
+ TaskAttemptListener taskAttemptListener, OutputCommitter committer,
+ Token<JobTokenIdentifier> jobToken,
+ Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock,
+ Set<TaskId> completedTasksFromPreviousRun, int startCount,
+ MRAppMetrics metrics) {
+ super(jobId, TaskType.MAP, partition, eventHandler, remoteJobConfFile,
+ conf, taskAttemptListener, committer, jobToken, fsTokens, clock,
+ completedTasksFromPreviousRun, startCount, metrics);
+ this.taskSplitMetaInfo = taskSplitMetaInfo;
+ }
+
+ @Override
+ protected int getMaxAttempts() {
+ return conf.getInt(MRJobConfig.MAP_MAX_ATTEMPTS, 4);
+ }
+
+ @Override
+ protected TaskAttemptImpl createAttempt() {
+ return new MapTaskAttemptImpl(getID(), nextAttemptNumber,
+ eventHandler, jobFile,
+ partition, taskSplitMetaInfo, conf, taskAttemptListener,
+ committer, jobToken, fsTokens, clock);
+ }
+
+ @Override
+ public TaskType getType() {
+ return TaskType.MAP;
+ }
+
+ protected TaskSplitMetaInfo getTaskSplitMetaInfo() {
+ return this.taskSplitMetaInfo;
+ }
+
+ /**
+ * @return a String formatted as a comma-separated list of splits.
+ */
+ @Override
+ protected String getSplitsAsString() {
+ String[] splits = getTaskSplitMetaInfo().getLocations();
+ if (splits == null || splits.length == 0)
+ return "";
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < splits.length; i++) {
+ if (i != 0) sb.append(",");
+ sb.append(splits[i]);
+ }
+ return sb.toString();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/ReduceTaskImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/ReduceTaskImpl.java
new file mode 100644
index 0000000..ae2e84a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/ReduceTaskImpl.java
@@ -0,0 +1,75 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.impl;
+
+import java.util.Collection;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.ReduceTaskAttemptImpl;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
+import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.event.EventHandler;
+
+public class ReduceTaskImpl extends TaskImpl {
+
+ private final int numMapTasks;
+
+ public ReduceTaskImpl(JobId jobId, int partition,
+ EventHandler eventHandler, Path jobFile, Configuration conf,
+ int numMapTasks, TaskAttemptListener taskAttemptListener,
+ OutputCommitter committer, Token<JobTokenIdentifier> jobToken,
+ Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock,
+ Set<TaskId> completedTasksFromPreviousRun, int startCount,
+ MRAppMetrics metrics) {
+ super(jobId, TaskType.REDUCE, partition, eventHandler, jobFile, conf,
+ taskAttemptListener, committer, jobToken, fsTokens, clock,
+ completedTasksFromPreviousRun, startCount, metrics);
+ this.numMapTasks = numMapTasks;
+ }
+
+ @Override
+ protected int getMaxAttempts() {
+ return conf.getInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 4);
+ }
+
+ @Override
+ protected TaskAttemptImpl createAttempt() {
+ return new ReduceTaskAttemptImpl(getID(), nextAttemptNumber,
+ eventHandler, jobFile,
+ partition, numMapTasks, conf, taskAttemptListener,
+ committer, jobToken, fsTokens, clock);
+ }
+
+ @Override
+ public TaskType getType() {
+ return TaskType.REDUCE;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
new file mode 100644
index 0000000..87e0e08
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
@@ -0,0 +1,1442 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.impl;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.MapReduceChildJVM;
+import org.apache.hadoop.mapred.ProgressSplitsBlock;
+import org.apache.hadoop.mapred.ShuffleHandler;
+import org.apache.hadoop.mapred.Task;
+import org.apache.hadoop.mapred.TaskAttemptContextImpl;
+import org.apache.hadoop.mapred.WrappedJvmID;
+import org.apache.hadoop.mapred.WrappedProgressSplitsBlock;
+import org.apache.hadoop.mapreduce.JobCounter;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.TaskCounter;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.filecache.DistributedCache;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
+import org.apache.hadoop.mapreduce.jobhistory.MapAttemptFinishedEvent;
+import org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinishedEvent;
+import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptStartedEvent;
+import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletionEvent;
+import org.apache.hadoop.mapreduce.security.TokenCache;
+import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
+import org.apache.hadoop.mapreduce.v2.MRConstants;
+import org.apache.hadoop.mapreduce.v2.api.records.Counter;
+import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.v2.api.records.Phase;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptFetchFailureEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent;
+import org.apache.hadoop.mapreduce.v2.app.speculate.SpeculatorEvent;
+import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleanupEvent;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JHConfig;
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerToken;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.SingleArcTransition;
+import org.apache.hadoop.yarn.state.StateMachine;
+import org.apache.hadoop.yarn.state.StateMachineFactory;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.util.RackResolver;
+
+/**
+ * Implementation of TaskAttempt interface.
+ */
+@SuppressWarnings("all")
+public abstract class TaskAttemptImpl implements
+ org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt,
+ EventHandler<TaskAttemptEvent> {
+
+ private static final Log LOG = LogFactory.getLog(TaskAttemptImpl.class);
+ private static final long MEMORY_SPLITS_RESOLUTION = 1024; //TODO Make configurable?
+ private static final int MAP_MEMORY_MB_DEFAULT = 1024;
+ private static final int REDUCE_MEMORY_MB_DEFAULT = 1024;
+ private final static RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+ protected final Configuration conf;
+ protected final Path jobFile;
+ protected final int partition;
+ protected final EventHandler eventHandler;
+ private final TaskAttemptId attemptId;
+ private final Clock clock;
+ private final org.apache.hadoop.mapred.JobID oldJobId;
+ private final TaskAttemptListener taskAttemptListener;
+ private final OutputCommitter committer;
+ private final Resource resourceCapability;
+ private final String[] dataLocalHosts;
+ private final List<String> diagnostics = new ArrayList<String>();
+ private final Lock readLock;
+ private final Lock writeLock;
+ private Collection<Token<? extends TokenIdentifier>> fsTokens;
+ private Token<JobTokenIdentifier> jobToken;
+ private static AtomicBoolean initialClasspathFlag = new AtomicBoolean();
+ private static String initialClasspath = null;
+ private final Object classpathLock = new Object();
+ private long launchTime;
+ private long finishTime;
+ private WrappedProgressSplitsBlock progressSplitBlock;
+
+ private static final CleanupContainerTransition CLEANUP_CONTAINER_TRANSITION =
+ new CleanupContainerTransition();
+
+ private static final DiagnosticInformationUpdater
+ DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION
+ = new DiagnosticInformationUpdater();
+
+ private static final StateMachineFactory
+ <TaskAttemptImpl, TaskAttemptState, TaskAttemptEventType, TaskAttemptEvent>
+ stateMachineFactory
+ = new StateMachineFactory
+ <TaskAttemptImpl, TaskAttemptState, TaskAttemptEventType, TaskAttemptEvent>
+ (TaskAttemptState.NEW)
+
+ // Transitions from the NEW state.
+ .addTransition(TaskAttemptState.NEW, TaskAttemptState.UNASSIGNED,
+ TaskAttemptEventType.TA_SCHEDULE, new RequestContainerTransition(false))
+ .addTransition(TaskAttemptState.NEW, TaskAttemptState.UNASSIGNED,
+ TaskAttemptEventType.TA_RESCHEDULE, new RequestContainerTransition(true))
+ .addTransition(TaskAttemptState.NEW, TaskAttemptState.KILLED,
+ TaskAttemptEventType.TA_KILL, new KilledTransition())
+ .addTransition(TaskAttemptState.NEW, TaskAttemptState.FAILED,
+ TaskAttemptEventType.TA_FAILMSG, new FailedTransition())
+
+ // Transitions from the UNASSIGNED state.
+ .addTransition(TaskAttemptState.UNASSIGNED,
+ TaskAttemptState.ASSIGNED, TaskAttemptEventType.TA_ASSIGNED,
+ new ContainerAssignedTransition())
+ .addTransition(TaskAttemptState.UNASSIGNED, TaskAttemptState.KILLED,
+ TaskAttemptEventType.TA_KILL, new DeallocateContainerTransition(
+ TaskAttemptState.KILLED, true))
+ .addTransition(TaskAttemptState.UNASSIGNED, TaskAttemptState.FAILED,
+ TaskAttemptEventType.TA_FAILMSG, new DeallocateContainerTransition(
+ TaskAttemptState.FAILED, true))
+
+ // Transitions from the ASSIGNED state.
+ .addTransition(TaskAttemptState.ASSIGNED, TaskAttemptState.RUNNING,
+ TaskAttemptEventType.TA_CONTAINER_LAUNCHED,
+ new LaunchedContainerTransition())
+ .addTransition(TaskAttemptState.ASSIGNED, TaskAttemptState.ASSIGNED,
+ TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
+ DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
+ .addTransition(TaskAttemptState.ASSIGNED, TaskAttemptState.FAILED,
+ TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED,
+ new DeallocateContainerTransition(TaskAttemptState.FAILED, false))
+ .addTransition(TaskAttemptState.ASSIGNED,
+ TaskAttemptState.KILL_CONTAINER_CLEANUP,
+ TaskAttemptEventType.TA_KILL, CLEANUP_CONTAINER_TRANSITION)
+ .addTransition(TaskAttemptState.ASSIGNED,
+ TaskAttemptState.FAIL_CONTAINER_CLEANUP,
+ TaskAttemptEventType.TA_FAILMSG, CLEANUP_CONTAINER_TRANSITION)
+
+ // Transitions from RUNNING state.
+ .addTransition(TaskAttemptState.RUNNING, TaskAttemptState.RUNNING,
+ TaskAttemptEventType.TA_UPDATE, new StatusUpdater())
+ .addTransition(TaskAttemptState.RUNNING, TaskAttemptState.RUNNING,
+ TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
+ DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
+ // If no commit is required, task directly goes to success
+ .addTransition(TaskAttemptState.RUNNING,
+ TaskAttemptState.SUCCESS_CONTAINER_CLEANUP,
+ TaskAttemptEventType.TA_DONE, CLEANUP_CONTAINER_TRANSITION)
+ // If commit is required, task goes through commit pending state.
+ .addTransition(TaskAttemptState.RUNNING,
+ TaskAttemptState.COMMIT_PENDING,
+ TaskAttemptEventType.TA_COMMIT_PENDING, new CommitPendingTransition())
+ // Failure handling while RUNNING
+ .addTransition(TaskAttemptState.RUNNING,
+ TaskAttemptState.FAIL_CONTAINER_CLEANUP,
+ TaskAttemptEventType.TA_FAILMSG, CLEANUP_CONTAINER_TRANSITION)
+ //for handling container exit without sending the done or fail msg
+ .addTransition(TaskAttemptState.RUNNING,
+ TaskAttemptState.FAIL_CONTAINER_CLEANUP,
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED,
+ CLEANUP_CONTAINER_TRANSITION)
+ // Timeout handling while RUNNING
+ .addTransition(TaskAttemptState.RUNNING,
+ TaskAttemptState.FAIL_CONTAINER_CLEANUP,
+ TaskAttemptEventType.TA_TIMED_OUT, CLEANUP_CONTAINER_TRANSITION)
+ // Kill handling
+ .addTransition(TaskAttemptState.RUNNING,
+ TaskAttemptState.KILL_CONTAINER_CLEANUP, TaskAttemptEventType.TA_KILL,
+ CLEANUP_CONTAINER_TRANSITION)
+
+ // Transitions from COMMIT_PENDING state
+ .addTransition(TaskAttemptState.COMMIT_PENDING,
+ TaskAttemptState.COMMIT_PENDING, TaskAttemptEventType.TA_UPDATE,
+ new StatusUpdater())
+ .addTransition(TaskAttemptState.COMMIT_PENDING,
+ TaskAttemptState.COMMIT_PENDING,
+ TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
+ DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
+ .addTransition(TaskAttemptState.COMMIT_PENDING,
+ TaskAttemptState.SUCCESS_CONTAINER_CLEANUP,
+ TaskAttemptEventType.TA_DONE, CLEANUP_CONTAINER_TRANSITION)
+ .addTransition(TaskAttemptState.COMMIT_PENDING,
+ TaskAttemptState.KILL_CONTAINER_CLEANUP, TaskAttemptEventType.TA_KILL,
+ CLEANUP_CONTAINER_TRANSITION)
+ .addTransition(TaskAttemptState.COMMIT_PENDING,
+ TaskAttemptState.FAIL_CONTAINER_CLEANUP,
+ TaskAttemptEventType.TA_FAILMSG, CLEANUP_CONTAINER_TRANSITION)
+ .addTransition(TaskAttemptState.COMMIT_PENDING,
+ TaskAttemptState.FAIL_CONTAINER_CLEANUP,
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED,
+ CLEANUP_CONTAINER_TRANSITION)
+ .addTransition(TaskAttemptState.COMMIT_PENDING,
+ TaskAttemptState.FAIL_CONTAINER_CLEANUP,
+ TaskAttemptEventType.TA_TIMED_OUT, CLEANUP_CONTAINER_TRANSITION)
+
+ // Transitions from SUCCESS_CONTAINER_CLEANUP state
+ // kill and cleanup the container
+ .addTransition(TaskAttemptState.SUCCESS_CONTAINER_CLEANUP,
+ TaskAttemptState.SUCCEEDED, TaskAttemptEventType.TA_CONTAINER_CLEANED,
+ new SucceededTransition())
+ .addTransition(
+ TaskAttemptState.SUCCESS_CONTAINER_CLEANUP,
+ TaskAttemptState.SUCCESS_CONTAINER_CLEANUP,
+ TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
+ DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
+ // Ignore-able events
+ .addTransition(TaskAttemptState.SUCCESS_CONTAINER_CLEANUP,
+ TaskAttemptState.SUCCESS_CONTAINER_CLEANUP,
+ EnumSet.of(TaskAttemptEventType.TA_KILL,
+ TaskAttemptEventType.TA_FAILMSG,
+ TaskAttemptEventType.TA_TIMED_OUT,
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED))
+
+ // Transitions from FAIL_CONTAINER_CLEANUP state.
+ .addTransition(TaskAttemptState.FAIL_CONTAINER_CLEANUP,
+ TaskAttemptState.FAIL_TASK_CLEANUP,
+ TaskAttemptEventType.TA_CONTAINER_CLEANED, new TaskCleanupTransition())
+ .addTransition(TaskAttemptState.FAIL_CONTAINER_CLEANUP,
+ TaskAttemptState.FAIL_CONTAINER_CLEANUP,
+ TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
+ DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
+ // Ignore-able events
+ .addTransition(TaskAttemptState.FAIL_CONTAINER_CLEANUP,
+ TaskAttemptState.FAIL_CONTAINER_CLEANUP,
+ EnumSet.of(TaskAttemptEventType.TA_KILL,
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED,
+ TaskAttemptEventType.TA_UPDATE,
+ TaskAttemptEventType.TA_COMMIT_PENDING,
+ TaskAttemptEventType.TA_CONTAINER_LAUNCHED,
+ TaskAttemptEventType.TA_DONE,
+ TaskAttemptEventType.TA_FAILMSG,
+ TaskAttemptEventType.TA_TIMED_OUT))
+
+ // Transitions from KILL_CONTAINER_CLEANUP
+ .addTransition(TaskAttemptState.KILL_CONTAINER_CLEANUP,
+ TaskAttemptState.KILL_TASK_CLEANUP,
+ TaskAttemptEventType.TA_CONTAINER_CLEANED, new TaskCleanupTransition())
+ .addTransition(TaskAttemptState.KILL_CONTAINER_CLEANUP,
+ TaskAttemptState.KILL_CONTAINER_CLEANUP,
+ TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
+ DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
+ // Ignore-able events
+ .addTransition(
+ TaskAttemptState.KILL_CONTAINER_CLEANUP,
+ TaskAttemptState.KILL_CONTAINER_CLEANUP,
+ EnumSet.of(TaskAttemptEventType.TA_KILL,
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED,
+ TaskAttemptEventType.TA_UPDATE,
+ TaskAttemptEventType.TA_COMMIT_PENDING,
+ TaskAttemptEventType.TA_CONTAINER_LAUNCHED,
+ TaskAttemptEventType.TA_DONE,
+ TaskAttemptEventType.TA_FAILMSG,
+ TaskAttemptEventType.TA_TIMED_OUT))
+
+ // Transitions from FAIL_TASK_CLEANUP
+ // run the task cleanup
+ .addTransition(TaskAttemptState.FAIL_TASK_CLEANUP,
+ TaskAttemptState.FAILED, TaskAttemptEventType.TA_CLEANUP_DONE,
+ new FailedTransition())
+ .addTransition(TaskAttemptState.FAIL_TASK_CLEANUP,
+ TaskAttemptState.FAIL_TASK_CLEANUP,
+ TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
+ DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
+ // Ignore-able events
+ .addTransition(TaskAttemptState.FAIL_TASK_CLEANUP,
+ TaskAttemptState.FAIL_TASK_CLEANUP,
+ EnumSet.of(TaskAttemptEventType.TA_KILL,
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED,
+ TaskAttemptEventType.TA_UPDATE,
+ TaskAttemptEventType.TA_COMMIT_PENDING,
+ TaskAttemptEventType.TA_DONE,
+ TaskAttemptEventType.TA_FAILMSG))
+
+ // Transitions from KILL_TASK_CLEANUP
+ .addTransition(TaskAttemptState.KILL_TASK_CLEANUP,
+ TaskAttemptState.KILLED, TaskAttemptEventType.TA_CLEANUP_DONE,
+ new KilledTransition())
+ .addTransition(TaskAttemptState.KILL_TASK_CLEANUP,
+ TaskAttemptState.KILL_TASK_CLEANUP,
+ TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
+ DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
+ // Ignore-able events
+ .addTransition(TaskAttemptState.KILL_TASK_CLEANUP,
+ TaskAttemptState.KILL_TASK_CLEANUP,
+ EnumSet.of(TaskAttemptEventType.TA_KILL,
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED,
+ TaskAttemptEventType.TA_UPDATE,
+ TaskAttemptEventType.TA_COMMIT_PENDING,
+ TaskAttemptEventType.TA_DONE,
+ TaskAttemptEventType.TA_FAILMSG))
+
+ // Transitions from SUCCEEDED
+ .addTransition(TaskAttemptState.SUCCEEDED, //only possible for map attempts
+ TaskAttemptState.FAILED,
+ TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE,
+ new TooManyFetchFailureTransition())
+ .addTransition(
+ TaskAttemptState.SUCCEEDED, TaskAttemptState.SUCCEEDED,
+ TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
+ DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
+ // Ignore-able events for SUCCEEDED state
+ .addTransition(TaskAttemptState.SUCCEEDED,
+ TaskAttemptState.SUCCEEDED,
+ EnumSet.of(TaskAttemptEventType.TA_KILL,
+ TaskAttemptEventType.TA_FAILMSG,
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED))
+
+ // Transitions from FAILED state
+ .addTransition(TaskAttemptState.FAILED, TaskAttemptState.FAILED,
+ TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
+ DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
+ // Ignore-able events for FAILED state
+ .addTransition(TaskAttemptState.FAILED, TaskAttemptState.FAILED,
+ EnumSet.of(TaskAttemptEventType.TA_KILL,
+ TaskAttemptEventType.TA_ASSIGNED,
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED,
+ TaskAttemptEventType.TA_UPDATE,
+ TaskAttemptEventType.TA_CONTAINER_LAUNCHED,
+ TaskAttemptEventType.TA_COMMIT_PENDING,
+ TaskAttemptEventType.TA_DONE,
+ TaskAttemptEventType.TA_FAILMSG))
+
+ // Transitions from KILLED state
+ .addTransition(TaskAttemptState.KILLED, TaskAttemptState.KILLED,
+ TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
+ DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
+ // Ignore-able events for KILLED state
+ .addTransition(TaskAttemptState.KILLED, TaskAttemptState.KILLED,
+ EnumSet.of(TaskAttemptEventType.TA_KILL,
+ TaskAttemptEventType.TA_ASSIGNED,
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED,
+ TaskAttemptEventType.TA_UPDATE,
+ TaskAttemptEventType.TA_CONTAINER_LAUNCHED,
+ TaskAttemptEventType.TA_COMMIT_PENDING,
+ TaskAttemptEventType.TA_DONE,
+ TaskAttemptEventType.TA_FAILMSG))
+
+ // create the topology tables
+ .installTopology();
+
+ private final StateMachine
+ <TaskAttemptState, TaskAttemptEventType, TaskAttemptEvent>
+ stateMachine;
+
+ private ContainerId containerID;
+ private String nodeHostName;
+ private String containerMgrAddress;
+ private String nodeHttpAddress;
+ private WrappedJvmID jvmID;
+ private ContainerToken containerToken;
+ private Resource assignedCapability;
+
+ //this takes good amount of memory ~ 30KB. Instantiate it lazily
+ //and make it null once task is launched.
+ private org.apache.hadoop.mapred.Task remoteTask;
+
+ //this is the last status reported by the REMOTE running attempt
+ private TaskAttemptStatus reportedStatus;
+
+ public TaskAttemptImpl(TaskId taskId, int i, EventHandler eventHandler,
+ TaskAttemptListener taskAttemptListener, Path jobFile, int partition,
+ Configuration conf, String[] dataLocalHosts, OutputCommitter committer,
+ Token<JobTokenIdentifier> jobToken,
+ Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock) {
+ oldJobId = TypeConverter.fromYarn(taskId.getJobId());
+ this.conf = conf;
+ this.clock = clock;
+ attemptId = recordFactory.newRecordInstance(TaskAttemptId.class);
+ attemptId.setTaskId(taskId);
+ attemptId.setId(i);
+ this.taskAttemptListener = taskAttemptListener;
+
+ // Initialize reportedStatus
+ reportedStatus = new TaskAttemptStatus();
+ initTaskAttemptStatus(reportedStatus);
+
+ ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
+ readLock = readWriteLock.readLock();
+ writeLock = readWriteLock.writeLock();
+
+ this.fsTokens = fsTokens;
+ this.jobToken = jobToken;
+ this.eventHandler = eventHandler;
+ this.committer = committer;
+ this.jobFile = jobFile;
+ this.partition = partition;
+
+ //TODO:create the resource reqt for this Task attempt
+ this.resourceCapability = recordFactory.newRecordInstance(Resource.class);
+ this.resourceCapability.setMemory(getMemoryRequired(conf, taskId.getTaskType()));
+ this.dataLocalHosts = dataLocalHosts;
+ RackResolver.init(conf);
+
+ // This "this leak" is okay because the retained pointer is in an
+ // instance variable.
+ stateMachine = stateMachineFactory.make(this);
+ }
+
+ private int getMemoryRequired(Configuration conf, TaskType taskType) {
+ int memory = 1024;
+ if (taskType == TaskType.MAP) {
+ memory = conf.getInt(MRJobConfig.MAP_MEMORY_MB, MAP_MEMORY_MB_DEFAULT);
+ } else if (taskType == TaskType.REDUCE) {
+ memory = conf.getInt(MRJobConfig.REDUCE_MEMORY_MB, REDUCE_MEMORY_MB_DEFAULT);
+ }
+
+ return memory;
+ }
+
+ /**
+ * Create a {@link LocalResource} record with all the given parameters.
+ * TODO: This should pave way for Builder pattern.
+ */
+ private static LocalResource createLocalResource(FileSystem fc,
+ RecordFactory recordFactory, Path file, LocalResourceType type,
+ LocalResourceVisibility visibility) throws IOException {
+ FileStatus fstat = fc.getFileStatus(file);
+ LocalResource resource =
+ recordFactory.newRecordInstance(LocalResource.class);
+ resource.setResource(ConverterUtils.getYarnUrlFromPath(fc.resolvePath(fstat
+ .getPath())));
+ resource.setType(type);
+ resource.setVisibility(visibility);
+ resource.setSize(fstat.getLen());
+ resource.setTimestamp(fstat.getModificationTime());
+ return resource;
+ }
+
+ /**
+ * Lock this on initialClasspath so that there is only one fork in the AM for
+ * getting the initial class-path. TODO: This should go away once we construct
+ * a parent CLC and use it for all the containers.
+ */
+ private String getInitialClasspath() throws IOException {
+ synchronized (classpathLock) {
+ if (initialClasspathFlag.get()) {
+ return initialClasspath;
+ }
+ Map<String, String> env = new HashMap<String, String>();
+ MRApps.setInitialClasspath(env);
+ initialClasspath = env.get(MRApps.CLASSPATH);
+ initialClasspathFlag.set(true);
+ return initialClasspath;
+ }
+ }
+
+
+ /**
+ * Create the {@link ContainerLaunchContext} for this attempt.
+ */
+ private ContainerLaunchContext createContainerLaunchContext() {
+
+ ContainerLaunchContext container =
+ recordFactory.newRecordInstance(ContainerLaunchContext.class);
+
+ try {
+ FileSystem remoteFS = FileSystem.get(conf);
+
+ // //////////// Set up JobJar to be localized properly on the remote NM.
+ if (conf.get(MRJobConfig.JAR) != null) {
+ Path remoteJobJar = (new Path(remoteTask.getConf().get(
+ MRJobConfig.JAR))).makeQualified(remoteFS.getUri(),
+ remoteFS.getWorkingDirectory());
+ container.setLocalResource(
+ MRConstants.JOB_JAR,
+ createLocalResource(remoteFS, recordFactory, remoteJobJar,
+ LocalResourceType.FILE, LocalResourceVisibility.APPLICATION));
+ LOG.info("The job-jar file on the remote FS is "
+ + remoteJobJar.toUri().toASCIIString());
+ } else {
+ // Job jar may be null. For e.g, for pipes, the job jar is the hadoop
+ // mapreduce jar itself which is already on the classpath.
+ LOG.info("Job jar is not present. "
+ + "Not adding any jar to the list of resources.");
+ }
+ // //////////// End of JobJar setup
+
+ // //////////// Set up JobConf to be localized properly on the remote NM.
+ Path path =
+ MRApps.getStagingAreaDir(conf, UserGroupInformation
+ .getCurrentUser().getShortUserName());
+ Path remoteJobSubmitDir =
+ new Path(path, oldJobId.toString());
+ Path remoteJobConfPath =
+ new Path(remoteJobSubmitDir, MRConstants.JOB_CONF_FILE);
+ container.setLocalResource(
+ MRConstants.JOB_CONF_FILE,
+ createLocalResource(remoteFS, recordFactory, remoteJobConfPath,
+ LocalResourceType.FILE, LocalResourceVisibility.APPLICATION));
+ LOG.info("The job-conf file on the remote FS is "
+ + remoteJobConfPath.toUri().toASCIIString());
+ // //////////// End of JobConf setup
+
+ // Setup DistributedCache
+ setupDistributedCache(remoteFS, conf, container);
+
+ // Setup up tokens
+ Credentials taskCredentials = new Credentials();
+
+ if (UserGroupInformation.isSecurityEnabled()) {
+ // Add file-system tokens
+ for (Token<? extends TokenIdentifier> token : fsTokens) {
+ LOG.info("Putting fs-token for NM use for launching container : "
+ + token.toString());
+ taskCredentials.addToken(token.getService(), token);
+ }
+ }
+
+ // LocalStorageToken is needed irrespective of whether security is enabled
+ // or not.
+ TokenCache.setJobToken(jobToken, taskCredentials);
+
+ DataOutputBuffer containerTokens_dob = new DataOutputBuffer();
+ LOG.info("Size of containertokens_dob is "
+ + taskCredentials.numberOfTokens());
+ taskCredentials.writeTokenStorageToStream(containerTokens_dob);
+ container.setContainerTokens(
+ ByteBuffer.wrap(containerTokens_dob.getData(), 0,
+ containerTokens_dob.getLength()));
+
+ // Add shuffle token
+ LOG.info("Putting shuffle token in serviceData");
+ DataOutputBuffer jobToken_dob = new DataOutputBuffer();
+ jobToken.write(jobToken_dob);
+ container
+ .setServiceData(
+ ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID,
+ ByteBuffer.wrap(jobToken_dob.getData(), 0,
+ jobToken_dob.getLength()));
+
+ MRApps.addToClassPath(container.getAllEnv(), getInitialClasspath());
+ } catch (IOException e) {
+ throw new YarnException(e);
+ }
+
+ container.setContainerId(containerID);
+ container.setUser(conf.get(MRJobConfig.USER_NAME)); // TODO: Fix
+
+ File workDir = new File("$PWD"); // Will be expanded by the shell.
+ String containerLogDir =
+ new File(ApplicationConstants.LOG_DIR_EXPANSION_VAR).toString();
+ String childTmpDir = new File(workDir, "tmp").toString();
+ String javaHome = "${JAVA_HOME}"; // Will be expanded by the shell.
+ String nmLdLibraryPath = "{LD_LIBRARY_PATH}"; // Expanded by the shell?
+ List<String> classPaths = new ArrayList<String>();
+
+ String localizedApplicationTokensFile =
+ new File(workDir, MRConstants.APPLICATION_TOKENS_FILE).toString();
+ classPaths.add(MRConstants.JOB_JAR);
+ classPaths.add(MRConstants.YARN_MAPREDUCE_APP_JAR_PATH);
+ classPaths.add(workDir.toString()); // TODO
+
+ // Construct the actual Container
+ container.addAllCommands(MapReduceChildJVM.getVMCommand(
+ taskAttemptListener.getAddress(), remoteTask, javaHome,
+ workDir.toString(), containerLogDir, childTmpDir, jvmID));
+
+ MapReduceChildJVM.setVMEnv(container.getAllEnv(), classPaths,
+ workDir.toString(), containerLogDir, nmLdLibraryPath, remoteTask,
+ localizedApplicationTokensFile);
+
+ // Construct the actual Container
+ container.setContainerId(containerID);
+ container.setUser(conf.get(MRJobConfig.USER_NAME));
+ container.setResource(assignedCapability);
+ return container;
+ }
+
+ private static long[] parseTimeStamps(String[] strs) {
+ if (null == strs) {
+ return null;
+ }
+ long[] result = new long[strs.length];
+ for(int i=0; i < strs.length; ++i) {
+ result[i] = Long.parseLong(strs[i]);
+ }
+ return result;
+ }
+
+ private void setupDistributedCache(FileSystem remoteFS, Configuration conf,
+ ContainerLaunchContext container) throws IOException {
+
+ // Cache archives
+ parseDistributedCacheArtifacts(remoteFS, container, LocalResourceType.ARCHIVE,
+ DistributedCache.getCacheArchives(conf),
+ parseTimeStamps(DistributedCache.getArchiveTimestamps(conf)),
+ getFileSizes(conf, MRJobConfig.CACHE_ARCHIVES_SIZES),
+ DistributedCache.getArchiveVisibilities(conf),
+ DistributedCache.getArchiveClassPaths(conf));
+
+ // Cache files
+ parseDistributedCacheArtifacts(remoteFS, container, LocalResourceType.FILE,
+ DistributedCache.getCacheFiles(conf),
+ parseTimeStamps(DistributedCache.getFileTimestamps(conf)),
+ getFileSizes(conf, MRJobConfig.CACHE_FILES_SIZES),
+ DistributedCache.getFileVisibilities(conf),
+ DistributedCache.getFileClassPaths(conf));
+ }
+
+ // TODO - Move this to MR!
+ // Use TaskDistributedCacheManager.CacheFiles.makeCacheFiles(URI[],
+ // long[], boolean[], Path[], FileType)
+ private void parseDistributedCacheArtifacts(
+ FileSystem remoteFS, ContainerLaunchContext container, LocalResourceType type,
+ URI[] uris, long[] timestamps, long[] sizes, boolean visibilities[],
+ Path[] pathsToPutOnClasspath) throws IOException {
+
+ if (uris != null) {
+ // Sanity check
+ if ((uris.length != timestamps.length) || (uris.length != sizes.length) ||
+ (uris.length != visibilities.length)) {
+ throw new IllegalArgumentException("Invalid specification for " +
+ "distributed-cache artifacts of type " + type + " :" +
+ " #uris=" + uris.length +
+ " #timestamps=" + timestamps.length +
+ " #visibilities=" + visibilities.length
+ );
+ }
+
+ Map<String, Path> classPaths = new HashMap<String, Path>();
+ if (pathsToPutOnClasspath != null) {
+ for (Path p : pathsToPutOnClasspath) {
+ p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(),
+ remoteFS.getWorkingDirectory()));
+ classPaths.put(p.toUri().getPath().toString(), p);
+ }
+ }
+ for (int i = 0; i < uris.length; ++i) {
+ URI u = uris[i];
+ Path p = new Path(u);
+ p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(),
+ remoteFS.getWorkingDirectory()));
+ // Add URI fragment or just the filename
+ Path name = new Path((null == u.getFragment())
+ ? p.getName()
+ : u.getFragment());
+ if (name.isAbsolute()) {
+ throw new IllegalArgumentException("Resource name must be relative");
+ }
+ String linkName = name.toUri().getPath();
+ container.setLocalResource(
+ linkName,
+ BuilderUtils.newLocalResource(recordFactory,
+ p.toUri(), type,
+ visibilities[i]
+ ? LocalResourceVisibility.PUBLIC
+ : LocalResourceVisibility.PRIVATE,
+ sizes[i], timestamps[i])
+ );
+ if (classPaths.containsKey(u.getPath())) {
+ Map<String, String> environment = container.getAllEnv();
+ MRApps.addToClassPath(environment, linkName);
+ }
+ }
+ }
+ }
+
+ // TODO - Move this to MR!
+ private static long[] getFileSizes(Configuration conf, String key) {
+ String[] strs = conf.getStrings(key);
+ if (strs == null) {
+ return null;
+ }
+ long[] result = new long[strs.length];
+ for(int i=0; i < strs.length; ++i) {
+ result[i] = Long.parseLong(strs[i]);
+ }
+ return result;
+ }
+
+ @Override
+ public ContainerId getAssignedContainerID() {
+ readLock.lock();
+ try {
+ return containerID;
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public String getAssignedContainerMgrAddress() {
+ readLock.lock();
+ try {
+ return containerMgrAddress;
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public long getLaunchTime() {
+ readLock.lock();
+ try {
+ return launchTime;
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public long getFinishTime() {
+ readLock.lock();
+ try {
+ return finishTime;
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ /**If container Assigned then return the node's address, otherwise null.
+ */
+ @Override
+ public String getNodeHttpAddress() {
+ readLock.lock();
+ try {
+ return nodeHttpAddress;
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ protected abstract org.apache.hadoop.mapred.Task createRemoteTask();
+
+ @Override
+ public TaskAttemptId getID() {
+ return attemptId;
+ }
+
+ @Override
+ public boolean isFinished() {
+ readLock.lock();
+ try {
+ // TODO: Use stateMachine level method?
+ return (getState() == TaskAttemptState.SUCCEEDED ||
+ getState() == TaskAttemptState.FAILED ||
+ getState() == TaskAttemptState.KILLED);
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public TaskAttemptReport getReport() {
+ TaskAttemptReport result = recordFactory.newRecordInstance(TaskAttemptReport.class);
+ readLock.lock();
+ try {
+ result.setTaskAttemptId(attemptId);
+ //take the LOCAL state of attempt
+ //DO NOT take from reportedStatus
+
+ result.setTaskAttemptState(getState());
+ result.setProgress(reportedStatus.progress);
+ result.setStartTime(launchTime);
+ result.setFinishTime(finishTime);
+ result.setDiagnosticInfo(reportedStatus.diagnosticInfo);
+ result.setPhase(reportedStatus.phase);
+ result.setStateString(reportedStatus.stateString);
+ result.setCounters(getCounters());
+ return result;
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public List<String> getDiagnostics() {
+ List<String> result = new ArrayList<String>();
+ readLock.lock();
+ try {
+ result.addAll(diagnostics);
+ return result;
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public Counters getCounters() {
+ readLock.lock();
+ try {
+ Counters counters = reportedStatus.counters;
+ if (counters == null) {
+ counters = recordFactory.newRecordInstance(Counters.class);
+// counters.groups = new HashMap<String, CounterGroup>();
+ }
+ return counters;
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public float getProgress() {
+ readLock.lock();
+ try {
+ return reportedStatus.progress;
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public TaskAttemptState getState() {
+ readLock.lock();
+ try {
+ return stateMachine.getCurrentState();
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public void handle(TaskAttemptEvent event) {
+ LOG.info("Processing " + event.getTaskAttemptID() +
+ " of type " + event.getType());
+ writeLock.lock();
+ try {
+ final TaskAttemptState oldState = getState();
+ try {
+ stateMachine.doTransition(event.getType(), event);
+ } catch (InvalidStateTransitonException e) {
+ LOG.error("Can't handle this event at current state", e);
+ eventHandler.handle(new JobDiagnosticsUpdateEvent(
+ this.attemptId.getTaskId().getJobId(), "Invalid event " + event.getType() +
+ " on TaskAttempt " + this.attemptId));
+ eventHandler.handle(new JobEvent(this.attemptId.getTaskId().getJobId(),
+ JobEventType.INTERNAL_ERROR));
+ }
+ if (oldState != getState()) {
+ LOG.info(attemptId + " TaskAttempt Transitioned from "
+ + oldState + " to "
+ + getState());
+ }
+ } finally {
+ writeLock.unlock();
+ }
+ }
+
+ //always called in write lock
+ private void setFinishTime() {
+ //set the finish time only if launch time is set
+ if (launchTime != 0) {
+ finishTime = clock.getTime();
+ }
+ }
+
+ private static long computeSlotMillis(TaskAttemptImpl taskAttempt) {
+ TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
+ int slotMemoryReq =
+ taskAttempt.getMemoryRequired(taskAttempt.conf, taskType);
+ int simSlotsRequired =
+ slotMemoryReq
+ / (taskType == TaskType.MAP ? MAP_MEMORY_MB_DEFAULT
+ : REDUCE_MEMORY_MB_DEFAULT);
+ // Simulating MRv1 slots for counters by assuming *_MEMORY_MB_DEFAULT
+ // corresponds to a MrV1 slot.
+ // Fallow slot millis is not applicable in MRv2 - since a container is
+ // either assigned with the required memory or is not. No partial
+ // reserveations
+ long slotMillisIncrement =
+ simSlotsRequired
+ * (taskAttempt.getFinishTime() - taskAttempt.getLaunchTime());
+ return slotMillisIncrement;
+ }
+
+ private static JobCounterUpdateEvent createJobCounterUpdateEventTAFailed(
+ TaskAttemptImpl taskAttempt) {
+ TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
+ JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());
+
+ long slotMillisIncrement = computeSlotMillis(taskAttempt);
+
+ if (taskType == TaskType.MAP) {
+ jce.addCounterUpdate(JobCounter.NUM_FAILED_MAPS, 1);
+ jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, slotMillisIncrement);
+ } else {
+ jce.addCounterUpdate(JobCounter.NUM_FAILED_REDUCES, 1);
+ jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, slotMillisIncrement);
+ }
+ return jce;
+ }
+
+ private static TaskAttemptUnsuccessfulCompletionEvent createTaskAttemptUnsuccessfulCompletionEvent(
+ TaskAttemptImpl taskAttempt, TaskAttemptState attemptState) {
+ TaskAttemptUnsuccessfulCompletionEvent tauce = new TaskAttemptUnsuccessfulCompletionEvent(
+ TypeConverter.fromYarn(taskAttempt.attemptId),
+ TypeConverter.fromYarn(taskAttempt.attemptId.getTaskId().getTaskType()),
+ attemptState.toString(), taskAttempt.finishTime,
+ taskAttempt.nodeHostName == null ? "UNKNOWN" : taskAttempt.nodeHostName,
+ taskAttempt.reportedStatus.diagnosticInfo.toString(),
+ taskAttempt.getProgressSplitBlock().burst());
+ return tauce;
+ }
+
+ private WrappedProgressSplitsBlock getProgressSplitBlock() {
+ readLock.lock();
+ try {
+ if (progressSplitBlock == null) {
+ progressSplitBlock = new WrappedProgressSplitsBlock(conf.getInt(
+ JHConfig.JOBHISTORY_TASKPROGRESS_NUMBER_SPLITS_KEY,
+ WrappedProgressSplitsBlock.DEFAULT_NUMBER_PROGRESS_SPLITS));
+ }
+ return progressSplitBlock;
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ private void updateProgressSplits() {
+ double newProgress = reportedStatus.progress;
+ Counters counters = reportedStatus.counters;
+ if (counters == null)
+ return;
+
+ WrappedProgressSplitsBlock splitsBlock = getProgressSplitBlock();
+ if (splitsBlock != null) {
+ long now = clock.getTime();
+ long start = getLaunchTime(); // TODO Ensure not 0
+
+ if (start != 0 && now - start <= Integer.MAX_VALUE) {
+ splitsBlock.getProgressWallclockTime().extend(newProgress,
+ (int) (now - start));
+ }
+
+ Counter cpuCounter = counters.getCounter(
+ TaskCounter.CPU_MILLISECONDS);
+ if (cpuCounter != null && cpuCounter.getValue() <= Integer.MAX_VALUE) {
+ splitsBlock.getProgressCPUTime().extend(newProgress,
+ (int) cpuCounter.getValue());
+ }
+
+ Counter virtualBytes = counters.getCounter(
+ TaskCounter.VIRTUAL_MEMORY_BYTES);
+ if (virtualBytes != null) {
+ splitsBlock.getProgressVirtualMemoryKbytes().extend(newProgress,
+ (int) (virtualBytes.getValue() / (MEMORY_SPLITS_RESOLUTION)));
+ }
+
+ Counter physicalBytes = counters.getCounter(
+ TaskCounter.PHYSICAL_MEMORY_BYTES);
+ if (physicalBytes != null) {
+ splitsBlock.getProgressPhysicalMemoryKbytes().extend(newProgress,
+ (int) (physicalBytes.getValue() / (MEMORY_SPLITS_RESOLUTION)));
+ }
+ }
+ }
+
+ private static class RequestContainerTransition implements
+ SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
+ private final boolean rescheduled;
+ public RequestContainerTransition(boolean rescheduled) {
+ this.rescheduled = rescheduled;
+ }
+ @Override
+ public void transition(TaskAttemptImpl taskAttempt,
+ TaskAttemptEvent event) {
+ // Tell any speculator that we're requesting a container
+ taskAttempt.eventHandler.handle
+ (new SpeculatorEvent(taskAttempt.getID().getTaskId(), +1));
+ //request for container
+ if (rescheduled) {
+ taskAttempt.eventHandler.handle(
+ ContainerRequestEvent.createContainerRequestEventForFailedContainer(
+ taskAttempt.attemptId,
+ taskAttempt.resourceCapability));
+ } else {
+ int i = 0;
+ String[] racks = new String[taskAttempt.dataLocalHosts.length];
+ for (String host : taskAttempt.dataLocalHosts) {
+ racks[i++] = RackResolver.resolve(host).getNetworkLocation();
+ }
+ taskAttempt.eventHandler.handle(
+ new ContainerRequestEvent(taskAttempt.attemptId,
+ taskAttempt.resourceCapability,
+ taskAttempt.dataLocalHosts, racks));
+ }
+ }
+ }
+
+ private static class ContainerAssignedTransition implements
+ SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
+ @Override
+ public void transition(final TaskAttemptImpl taskAttempt,
+ TaskAttemptEvent event) {
+ TaskAttemptContainerAssignedEvent cEvent =
+ (TaskAttemptContainerAssignedEvent) event;
+ taskAttempt.containerID = cEvent.getContainer().getId();
+ taskAttempt.nodeHostName = cEvent.getContainer().getNodeId().getHost();
+ taskAttempt.containerMgrAddress = cEvent.getContainer().getNodeId()
+ .toString();
+ taskAttempt.nodeHttpAddress = cEvent.getContainer().getNodeHttpAddress();
+ taskAttempt.containerToken = cEvent.getContainer().getContainerToken();
+ taskAttempt.assignedCapability = cEvent.getContainer().getResource();
+ // this is a _real_ Task (classic Hadoop mapred flavor):
+ taskAttempt.remoteTask = taskAttempt.createRemoteTask();
+ taskAttempt.jvmID = new WrappedJvmID(
+ taskAttempt.remoteTask.getTaskID().getJobID(),
+ taskAttempt.remoteTask.isMapTask(), taskAttempt.containerID.getId());
+
+ //launch the container
+ //create the container object to be launched for a given Task attempt
+ taskAttempt.eventHandler.handle(
+ new ContainerRemoteLaunchEvent(taskAttempt.attemptId,
+ taskAttempt.containerID,
+ taskAttempt.containerMgrAddress, taskAttempt.containerToken) {
+ @Override
+ public ContainerLaunchContext getContainer() {
+ return taskAttempt.createContainerLaunchContext();
+ }
+ @Override
+ public Task getRemoteTask() { // classic mapred Task, not YARN version
+ return taskAttempt.remoteTask;
+ }
+ });
+
+ // send event to speculator that our container needs are satisfied
+ taskAttempt.eventHandler.handle
+ (new SpeculatorEvent(taskAttempt.getID().getTaskId(), -1));
+ }
+ }
+
+ private static class DeallocateContainerTransition implements
+ SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
+ private final TaskAttemptState finalState;
+ private final boolean withdrawsContainerRequest;
+ DeallocateContainerTransition
+ (TaskAttemptState finalState, boolean withdrawsContainerRequest) {
+ this.finalState = finalState;
+ this.withdrawsContainerRequest = withdrawsContainerRequest;
+ }
+ @Override
+ public void transition(TaskAttemptImpl taskAttempt,
+ TaskAttemptEvent event) {
+ //set the finish time
+ taskAttempt.setFinishTime();
+ //send the deallocate event to ContainerAllocator
+ taskAttempt.eventHandler.handle(
+ new ContainerAllocatorEvent(taskAttempt.attemptId,
+ ContainerAllocator.EventType.CONTAINER_DEALLOCATE));
+
+ // send event to speculator that we withdraw our container needs, if
+ // we're transitioning out of UNASSIGNED
+ if (withdrawsContainerRequest) {
+ taskAttempt.eventHandler.handle
+ (new SpeculatorEvent(taskAttempt.getID().getTaskId(), -1));
+ }
+
+ switch(finalState) {
+ case FAILED:
+ taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
+ taskAttempt.attemptId,
+ TaskEventType.T_ATTEMPT_FAILED));
+ break;
+ case KILLED:
+ taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
+ taskAttempt.attemptId,
+ TaskEventType.T_ATTEMPT_KILLED));
+ break;
+ }
+ if (taskAttempt.getLaunchTime() != 0) {
+ TaskAttemptUnsuccessfulCompletionEvent tauce =
+ createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
+ finalState);
+ taskAttempt.eventHandler
+ .handle(createJobCounterUpdateEventTAFailed(taskAttempt));
+ taskAttempt.eventHandler.handle(new JobHistoryEvent(
+ taskAttempt.attemptId.getTaskId().getJobId(), tauce));
+ } else {
+ LOG.debug("Not generating HistoryFinish event since start event not generated for taskAttempt: "
+ + taskAttempt.getID());
+ }
+ }
+ }
+
+ private static class LaunchedContainerTransition implements
+ SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
+ @Override
+ public void transition(TaskAttemptImpl taskAttempt,
+ TaskAttemptEvent event) {
+ //set the launch time
+ taskAttempt.launchTime = taskAttempt.clock.getTime();
+ // register it to TaskAttemptListener so that it start listening
+ // for it
+ taskAttempt.taskAttemptListener.register(
+ taskAttempt.attemptId, taskAttempt.remoteTask, taskAttempt.jvmID);
+ //TODO Resolve to host / IP in case of a local address.
+ InetSocketAddress nodeHttpInetAddr =
+ NetUtils.createSocketAddr(taskAttempt.nodeHttpAddress); // TODO:
+ // Costly?
+ JobCounterUpdateEvent jce =
+ new JobCounterUpdateEvent(taskAttempt.attemptId.getTaskId()
+ .getJobId());
+ jce.addCounterUpdate(
+ taskAttempt.attemptId.getTaskId().getTaskType() == TaskType.MAP ?
+ JobCounter.TOTAL_LAUNCHED_MAPS: JobCounter.TOTAL_LAUNCHED_REDUCES
+ , 1);
+ taskAttempt.eventHandler.handle(jce);
+
+ TaskAttemptStartedEvent tase =
+ new TaskAttemptStartedEvent(TypeConverter.fromYarn(taskAttempt.attemptId),
+ TypeConverter.fromYarn(taskAttempt.attemptId.getTaskId().getTaskType()),
+ taskAttempt.launchTime,
+ nodeHttpInetAddr.getHostName(), nodeHttpInetAddr.getPort());
+ taskAttempt.eventHandler.handle
+ (new JobHistoryEvent(taskAttempt.attemptId.getTaskId().getJobId(), tase));
+ taskAttempt.eventHandler.handle
+ (new SpeculatorEvent
+ (taskAttempt.attemptId, true, taskAttempt.clock.getTime()));
+ //make remoteTask reference as null as it is no more needed
+ //and free up the memory
+ taskAttempt.remoteTask = null;
+
+ //tell the Task that attempt has started
+ taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
+ taskAttempt.attemptId,
+ TaskEventType.T_ATTEMPT_LAUNCHED));
+ }
+ }
+
+ private static class CommitPendingTransition implements
+ SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
+ @Override
+ public void transition(TaskAttemptImpl taskAttempt,
+ TaskAttemptEvent event) {
+ taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
+ taskAttempt.attemptId,
+ TaskEventType.T_ATTEMPT_COMMIT_PENDING));
+ }
+ }
+
+ private static class TaskCleanupTransition implements
+ SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
+ @Override
+ public void transition(TaskAttemptImpl taskAttempt,
+ TaskAttemptEvent event) {
+ TaskAttemptContext taskContext =
+ new TaskAttemptContextImpl(new JobConf(taskAttempt.conf),
+ TypeConverter.fromYarn(taskAttempt.attemptId));
+ taskAttempt.eventHandler.handle(new TaskCleanupEvent(
+ taskAttempt.attemptId,
+ taskAttempt.committer,
+ taskContext));
+ }
+ }
+
+ private static class SucceededTransition implements
+ SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
+ @Override
+ public void transition(TaskAttemptImpl taskAttempt,
+ TaskAttemptEvent event) {
+ //set the finish time
+ taskAttempt.setFinishTime();
+ String taskType =
+ TypeConverter.fromYarn(taskAttempt.attemptId.getTaskId().getTaskType()).toString();
+ LOG.info("In TaskAttemptImpl taskType: " + taskType);
+ long slotMillis = computeSlotMillis(taskAttempt);
+ JobCounterUpdateEvent jce =
+ new JobCounterUpdateEvent(taskAttempt.attemptId.getTaskId()
+ .getJobId());
+ jce.addCounterUpdate(
+ taskAttempt.attemptId.getTaskId().getTaskType() == TaskType.MAP ?
+ JobCounter.SLOTS_MILLIS_MAPS : JobCounter.SLOTS_MILLIS_REDUCES,
+ slotMillis);
+ taskAttempt.eventHandler.handle(jce);
+ taskAttempt.logAttemptFinishedEvent(TaskAttemptState.SUCCEEDED);
+ taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
+ taskAttempt.attemptId,
+ TaskEventType.T_ATTEMPT_SUCCEEDED));
+ taskAttempt.eventHandler.handle
+ (new SpeculatorEvent
+ (taskAttempt.reportedStatus, taskAttempt.clock.getTime()));
+ }
+ }
+
+ private static class FailedTransition implements
+ SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
+ @Override
+ public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) {
+ // set the finish time
+ taskAttempt.setFinishTime();
+
+ if (taskAttempt.getLaunchTime() != 0) {
+ taskAttempt.eventHandler
+ .handle(createJobCounterUpdateEventTAFailed(taskAttempt));
+ TaskAttemptUnsuccessfulCompletionEvent tauce =
+ createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
+ TaskAttemptState.FAILED);
+ taskAttempt.eventHandler.handle(new JobHistoryEvent(
+ taskAttempt.attemptId.getTaskId().getJobId(), tauce));
+ // taskAttempt.logAttemptFinishedEvent(TaskAttemptState.FAILED); Not
+ // handling failed map/reduce events.
+ }else {
+ LOG.debug("Not generating HistoryFinish event since start event not generated for taskAttempt: "
+ + taskAttempt.getID());
+ }
+ taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
+ taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));
+ }
+ }
+
+ private void logAttemptFinishedEvent(TaskAttemptState state) {
+ //Log finished events only if an attempt started.
+ if (getLaunchTime() == 0) return;
+ if (attemptId.getTaskId().getTaskType() == TaskType.MAP) {
+ MapAttemptFinishedEvent mfe =
+ new MapAttemptFinishedEvent(TypeConverter.fromYarn(attemptId),
+ TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
+ state.toString(),
+ this.reportedStatus.mapFinishTime,
+ finishTime, this.nodeHostName == null ? "UNKNOWN" : this.nodeHostName,
+ this.reportedStatus.stateString,
+ TypeConverter.fromYarn(getCounters()),
+ getProgressSplitBlock().burst());
+ eventHandler.handle(
+ new JobHistoryEvent(attemptId.getTaskId().getJobId(), mfe));
+ } else {
+ ReduceAttemptFinishedEvent rfe =
+ new ReduceAttemptFinishedEvent(TypeConverter.fromYarn(attemptId),
+ TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
+ state.toString(),
+ this.reportedStatus.shuffleFinishTime,
+ this.reportedStatus.sortFinishTime,
+ finishTime, this.containerMgrAddress == null ? "UNKNOWN" : this.containerMgrAddress,
+ this.reportedStatus.stateString,
+ TypeConverter.fromYarn(getCounters()),
+ getProgressSplitBlock().burst());
+ eventHandler.handle(
+ new JobHistoryEvent(attemptId.getTaskId().getJobId(), rfe));
+ }
+ }
+
+ private static class TooManyFetchFailureTransition implements
+ SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
+ @Override
+ public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) {
+ //add to diagnostic
+ taskAttempt.addDiagnosticInfo("Too Many fetch failures.Failing the attempt");
+ //set the finish time
+ taskAttempt.setFinishTime();
+
+ if (taskAttempt.getLaunchTime() != 0) {
+ taskAttempt.eventHandler
+ .handle(createJobCounterUpdateEventTAFailed(taskAttempt));
+ TaskAttemptUnsuccessfulCompletionEvent tauce =
+ createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
+ TaskAttemptState.FAILED);
+ taskAttempt.eventHandler.handle(new JobHistoryEvent(
+ taskAttempt.attemptId.getTaskId().getJobId(), tauce));
+ }else {
+ LOG.debug("Not generating HistoryFinish event since start event not generated for taskAttempt: "
+ + taskAttempt.getID());
+ }
+ taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
+ taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));
+ }
+ }
+
+ private static class KilledTransition implements
+ SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
+
+ @Override
+ public void transition(TaskAttemptImpl taskAttempt,
+ TaskAttemptEvent event) {
+ //set the finish time
+ taskAttempt.setFinishTime();
+ if (taskAttempt.getLaunchTime() != 0) {
+ taskAttempt.eventHandler
+ .handle(createJobCounterUpdateEventTAFailed(taskAttempt));
+ TaskAttemptUnsuccessfulCompletionEvent tauce =
+ createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
+ TaskAttemptState.KILLED);
+ taskAttempt.eventHandler.handle(new JobHistoryEvent(
+ taskAttempt.attemptId.getTaskId().getJobId(), tauce));
+ }else {
+ LOG.debug("Not generating HistoryFinish event since start event not generated for taskAttempt: "
+ + taskAttempt.getID());
+ }
+// taskAttempt.logAttemptFinishedEvent(TaskAttemptState.KILLED); Not logging Map/Reduce attempts in case of failure.
+ taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
+ taskAttempt.attemptId,
+ TaskEventType.T_ATTEMPT_KILLED));
+ }
+ }
+
+ private static class CleanupContainerTransition implements
+ SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
+ @Override
+ public void transition(TaskAttemptImpl taskAttempt,
+ TaskAttemptEvent event) {
+ // unregister it to TaskAttemptListener so that it stops listening
+ // for it
+ taskAttempt.taskAttemptListener.unregister(
+ taskAttempt.attemptId, taskAttempt.jvmID);
+ //send the cleanup event to containerLauncher
+ taskAttempt.eventHandler.handle(new ContainerLauncherEvent(
+ taskAttempt.attemptId,
+ taskAttempt.containerID, taskAttempt.containerMgrAddress,
+ taskAttempt.containerToken,
+ ContainerLauncher.EventType.CONTAINER_REMOTE_CLEANUP));
+ }
+ }
+
+ private void addDiagnosticInfo(String diag) {
+ if (diag != null && !diag.equals("")) {
+ diagnostics.add(diag);
+ }
+ }
+
+ private static class StatusUpdater
+ implements SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
+ @Override
+ public void transition(TaskAttemptImpl taskAttempt,
+ TaskAttemptEvent event) {
+ // Status update calls don't really change the state of the attempt.
+ TaskAttemptStatus newReportedStatus =
+ ((TaskAttemptStatusUpdateEvent) event)
+ .getReportedTaskAttemptStatus();
+ // Now switch the information in the reportedStatus
+ taskAttempt.reportedStatus = newReportedStatus;
+ taskAttempt.reportedStatus.taskState = taskAttempt.getState();
+
+ // send event to speculator about the reported status
+ taskAttempt.eventHandler.handle
+ (new SpeculatorEvent
+ (taskAttempt.reportedStatus, taskAttempt.clock.getTime()));
+
+ //add to diagnostic
+ taskAttempt.addDiagnosticInfo(newReportedStatus.diagnosticInfo);
+ taskAttempt.updateProgressSplits();
+
+ //if fetch failures are present, send the fetch failure event to job
+ //this only will happen in reduce attempt type
+ if (taskAttempt.reportedStatus.fetchFailedMaps != null &&
+ taskAttempt.reportedStatus.fetchFailedMaps.size() > 0) {
+ taskAttempt.eventHandler.handle(new JobTaskAttemptFetchFailureEvent(
+ taskAttempt.attemptId, taskAttempt.reportedStatus.fetchFailedMaps));
+ }
+ }
+ }
+
+ private static class DiagnosticInformationUpdater
+ implements SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
+ @Override
+ public void transition(TaskAttemptImpl taskAttempt,
+ TaskAttemptEvent event) {
+ TaskAttemptDiagnosticsUpdateEvent diagEvent =
+ (TaskAttemptDiagnosticsUpdateEvent) event;
+ LOG.info("Diagnostics report from " + taskAttempt.attemptId + ": "
+ + diagEvent.getDiagnosticInfo());
+ taskAttempt.addDiagnosticInfo(diagEvent.getDiagnosticInfo());
+ }
+ }
+
+ private void initTaskAttemptStatus(TaskAttemptStatus result) {
+ result.progress = 0.0f;
+ result.diagnosticInfo = "";
+ result.phase = Phase.STARTING;
+ result.stateString = "NEW";
+ result.taskState = TaskAttemptState.NEW;
+ Counters counters = recordFactory.newRecordInstance(Counters.class);
+// counters.groups = new HashMap<String, CounterGroup>();
+ result.counters = counters;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
new file mode 100644
index 0000000..f1e4c80
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
@@ -0,0 +1,887 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.impl;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
+import org.apache.hadoop.mapreduce.jobhistory.TaskFailedEvent;
+import org.apache.hadoop.mapreduce.jobhistory.TaskFinishedEvent;
+import org.apache.hadoop.mapreduce.jobhistory.TaskStartedEvent;
+import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerFailedEvent;
+import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobMapTaskRescheduledEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptCompletedEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.MultipleArcTransition;
+import org.apache.hadoop.yarn.state.SingleArcTransition;
+import org.apache.hadoop.yarn.state.StateMachine;
+import org.apache.hadoop.yarn.state.StateMachineFactory;
+
+/**
+ * Implementation of Task interface.
+ */
+public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
+
+ private static final Log LOG = LogFactory.getLog(TaskImpl.class);
+
+ protected final Configuration conf;
+ protected final Path jobFile;
+ protected final OutputCommitter committer;
+ protected final int partition;
+ protected final TaskAttemptListener taskAttemptListener;
+ protected final EventHandler eventHandler;
+ private final TaskId taskId;
+ private Map<TaskAttemptId, TaskAttempt> attempts;
+ private final int maxAttempts;
+ protected final Clock clock;
+ private final Lock readLock;
+ private final Lock writeLock;
+ private final MRAppMetrics metrics;
+ private long scheduledTime;
+
+ private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+ protected Collection<Token<? extends TokenIdentifier>> fsTokens;
+ protected Token<JobTokenIdentifier> jobToken;
+
+ // counts the number of attempts that are either running or in a state where
+ // they will come to be running when they get a Container
+ private int numberUncompletedAttempts = 0;
+
+ private boolean historyTaskStartGenerated = false;
+
+ private static final SingleArcTransition<TaskImpl, TaskEvent>
+ ATTEMPT_KILLED_TRANSITION = new AttemptKilledTransition();
+ private static final SingleArcTransition<TaskImpl, TaskEvent>
+ KILL_TRANSITION = new KillTransition();
+
+ private static final StateMachineFactory
+ <TaskImpl, TaskState, TaskEventType, TaskEvent>
+ stateMachineFactory
+ = new StateMachineFactory<TaskImpl, TaskState, TaskEventType, TaskEvent>
+ (TaskState.NEW)
+
+ // define the state machine of Task
+
+ // Transitions from NEW state
+ .addTransition(TaskState.NEW, TaskState.SCHEDULED,
+ TaskEventType.T_SCHEDULE, new InitialScheduleTransition())
+ .addTransition(TaskState.NEW, TaskState.KILLED,
+ TaskEventType.T_KILL, new KillNewTransition())
+
+ // Transitions from SCHEDULED state
+ //when the first attempt is launched, the task state is set to RUNNING
+ .addTransition(TaskState.SCHEDULED, TaskState.RUNNING,
+ TaskEventType.T_ATTEMPT_LAUNCHED, new LaunchTransition())
+ .addTransition(TaskState.SCHEDULED, TaskState.KILL_WAIT,
+ TaskEventType.T_KILL, KILL_TRANSITION)
+ .addTransition(TaskState.SCHEDULED, TaskState.SCHEDULED,
+ TaskEventType.T_ATTEMPT_KILLED, ATTEMPT_KILLED_TRANSITION)
+ .addTransition(TaskState.SCHEDULED,
+ EnumSet.of(TaskState.SCHEDULED, TaskState.FAILED),
+ TaskEventType.T_ATTEMPT_FAILED,
+ new AttemptFailedTransition())
+
+ // Transitions from RUNNING state
+ .addTransition(TaskState.RUNNING, TaskState.RUNNING,
+ TaskEventType.T_ATTEMPT_LAUNCHED) //more attempts may start later
+ .addTransition(TaskState.RUNNING, TaskState.RUNNING,
+ TaskEventType.T_ATTEMPT_COMMIT_PENDING,
+ new AttemptCommitPendingTransition())
+ .addTransition(TaskState.RUNNING, TaskState.RUNNING,
+ TaskEventType.T_ADD_SPEC_ATTEMPT, new RedundantScheduleTransition())
+ .addTransition(TaskState.RUNNING, TaskState.SUCCEEDED,
+ TaskEventType.T_ATTEMPT_SUCCEEDED,
+ new AttemptSucceededTransition())
+ .addTransition(TaskState.RUNNING, TaskState.RUNNING,
+ TaskEventType.T_ATTEMPT_KILLED,
+ ATTEMPT_KILLED_TRANSITION)
+ .addTransition(TaskState.RUNNING,
+ EnumSet.of(TaskState.RUNNING, TaskState.FAILED),
+ TaskEventType.T_ATTEMPT_FAILED,
+ new AttemptFailedTransition())
+ .addTransition(TaskState.RUNNING, TaskState.KILL_WAIT,
+ TaskEventType.T_KILL, KILL_TRANSITION)
+
+ // Transitions from KILL_WAIT state
+ .addTransition(TaskState.KILL_WAIT,
+ EnumSet.of(TaskState.KILL_WAIT, TaskState.KILLED),
+ TaskEventType.T_ATTEMPT_KILLED,
+ new KillWaitAttemptKilledTransition())
+ // Ignore-able transitions.
+ .addTransition(
+ TaskState.KILL_WAIT,
+ TaskState.KILL_WAIT,
+ EnumSet.of(TaskEventType.T_KILL,
+ TaskEventType.T_ATTEMPT_LAUNCHED,
+ TaskEventType.T_ATTEMPT_COMMIT_PENDING,
+ TaskEventType.T_ATTEMPT_FAILED,
+ TaskEventType.T_ATTEMPT_SUCCEEDED,
+ TaskEventType.T_ADD_SPEC_ATTEMPT))
+
+ // Transitions from SUCCEEDED state
+ .addTransition(TaskState.SUCCEEDED, //only possible for map tasks
+ EnumSet.of(TaskState.SCHEDULED, TaskState.FAILED),
+ TaskEventType.T_ATTEMPT_FAILED, new MapRetroactiveFailureTransition())
+ // Ignore-able transitions.
+ .addTransition(
+ TaskState.SUCCEEDED, TaskState.SUCCEEDED,
+ EnumSet.of(TaskEventType.T_KILL,
+ TaskEventType.T_ADD_SPEC_ATTEMPT,
+ TaskEventType.T_ATTEMPT_LAUNCHED,
+ TaskEventType.T_ATTEMPT_KILLED))
+
+ // Transitions from FAILED state
+ .addTransition(TaskState.FAILED, TaskState.FAILED,
+ EnumSet.of(TaskEventType.T_KILL,
+ TaskEventType.T_ADD_SPEC_ATTEMPT))
+
+ // Transitions from KILLED state
+ .addTransition(TaskState.KILLED, TaskState.KILLED,
+ EnumSet.of(TaskEventType.T_KILL,
+ TaskEventType.T_ADD_SPEC_ATTEMPT))
+
+ // create the topology tables
+ .installTopology();
+
+ private final StateMachine<TaskState, TaskEventType, TaskEvent>
+ stateMachine;
+
+ protected int nextAttemptNumber;
+
+ //should be set to one which comes first
+ //saying COMMIT_PENDING
+ private TaskAttemptId commitAttempt;
+
+ private TaskAttemptId successfulAttempt;
+
+ private int failedAttempts;
+ private int finishedAttempts;//finish are total of success, failed and killed
+
+ @Override
+ public TaskState getState() {
+ return stateMachine.getCurrentState();
+ }
+
+ public TaskImpl(JobId jobId, TaskType taskType, int partition,
+ EventHandler eventHandler, Path remoteJobConfFile, Configuration conf,
+ TaskAttemptListener taskAttemptListener, OutputCommitter committer,
+ Token<JobTokenIdentifier> jobToken,
+ Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock,
+ Set<TaskId> completedTasksFromPreviousRun, int startCount,
+ MRAppMetrics metrics) {
+ this.conf = conf;
+ this.clock = clock;
+ this.jobFile = remoteJobConfFile;
+ ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
+ readLock = readWriteLock.readLock();
+ writeLock = readWriteLock.writeLock();
+ this.attempts = Collections.emptyMap();
+ // This overridable method call is okay in a constructor because we
+ // have a convention that none of the overrides depends on any
+ // fields that need initialization.
+ maxAttempts = getMaxAttempts();
+ taskId = recordFactory.newRecordInstance(TaskId.class);
+ taskId.setJobId(jobId);
+ taskId.setId(partition);
+ taskId.setTaskType(taskType);
+ this.partition = partition;
+ this.taskAttemptListener = taskAttemptListener;
+ this.eventHandler = eventHandler;
+ this.committer = committer;
+ this.fsTokens = fsTokens;
+ this.jobToken = jobToken;
+ this.metrics = metrics;
+
+ if (completedTasksFromPreviousRun != null
+ && completedTasksFromPreviousRun.contains(taskId)) {
+ LOG.info("Task is from previous run " + taskId);
+ startCount = startCount - 1;
+ }
+
+ //attempt ids are generated based on MR app startCount so that attempts
+ //from previous lives don't overstep the current one.
+ //this assumes that a task won't have more than 1000 attempts in its single
+ //life
+ nextAttemptNumber = (startCount - 1) * 1000;
+
+ // This "this leak" is okay because the retained pointer is in an
+ // instance variable.
+ stateMachine = stateMachineFactory.make(this);
+ }
+
+ @Override
+ public Map<TaskAttemptId, TaskAttempt> getAttempts() {
+ readLock.lock();
+
+ try {
+ if (attempts.size() <= 1) {
+ return attempts;
+ }
+
+ Map<TaskAttemptId, TaskAttempt> result
+ = new LinkedHashMap<TaskAttemptId, TaskAttempt>();
+ result.putAll(attempts);
+
+ return result;
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public TaskAttempt getAttempt(TaskAttemptId attemptID) {
+ readLock.lock();
+ try {
+ return attempts.get(attemptID);
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public TaskId getID() {
+ return taskId;
+ }
+
+ @Override
+ public boolean isFinished() {
+ readLock.lock();
+ try {
+ // TODO: Use stateMachine level method?
+ return (getState() == TaskState.SUCCEEDED ||
+ getState() == TaskState.FAILED ||
+ getState() == TaskState.KILLED);
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public TaskReport getReport() {
+ TaskReport report = recordFactory.newRecordInstance(TaskReport.class);
+ readLock.lock();
+ try {
+ report.setTaskId(taskId);
+ report.setStartTime(getLaunchTime());
+ report.setFinishTime(getFinishTime());
+ report.setTaskState(getState());
+ report.setProgress(getProgress());
+ report.setCounters(getCounters());
+
+ for (TaskAttempt attempt : attempts.values()) {
+ if (TaskAttemptState.RUNNING.equals(attempt.getState())) {
+ report.addRunningAttempt(attempt.getID());
+ }
+ }
+
+ report.setSuccessfulAttempt(successfulAttempt);
+
+ for (TaskAttempt att : attempts.values()) {
+ String prefix = "AttemptID:" + att.getID() + " Info:";
+ for (CharSequence cs : att.getDiagnostics()) {
+ report.addDiagnostics(prefix + cs);
+
+ }
+ }
+ return report;
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public Counters getCounters() {
+ Counters counters = null;
+ readLock.lock();
+ try {
+ TaskAttempt bestAttempt = selectBestAttempt();
+ if (bestAttempt != null) {
+ counters = bestAttempt.getCounters();
+ } else {
+ counters = recordFactory.newRecordInstance(Counters.class);
+// counters.groups = new HashMap<CharSequence, CounterGroup>();
+ }
+ return counters;
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ @Override
+ public float getProgress() {
+ readLock.lock();
+ try {
+ TaskAttempt bestAttempt = selectBestAttempt();
+ if (bestAttempt == null) {
+ return 0;
+ }
+ return bestAttempt.getProgress();
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ //this is always called in read/write lock
+ private long getLaunchTime() {
+ long launchTime = 0;
+ for (TaskAttempt at : attempts.values()) {
+ //select the least launch time of all attempts
+ if (launchTime == 0 || launchTime > at.getLaunchTime()) {
+ launchTime = at.getLaunchTime();
+ }
+ }
+ if (launchTime == 0) {
+ return this.scheduledTime;
+ }
+ return launchTime;
+ }
+
+ //this is always called in read/write lock
+ //TODO Verify behaviour is Task is killed (no finished attempt)
+ private long getFinishTime() {
+ if (!isFinished()) {
+ return 0;
+ }
+ long finishTime = 0;
+ for (TaskAttempt at : attempts.values()) {
+ //select the max finish time of all attempts
+ if (finishTime < at.getFinishTime()) {
+ finishTime = at.getFinishTime();
+ }
+ }
+ return finishTime;
+ }
+
+ private long getFinishTime(TaskAttemptId taId) {
+ if (taId == null) {
+ return clock.getTime();
+ }
+ long finishTime = 0;
+ for (TaskAttempt at : attempts.values()) {
+ //select the max finish time of all attempts
+ if (at.getID().equals(taId)) {
+ return at.getFinishTime();
+ }
+ }
+ return finishTime;
+ }
+
+ private TaskState finished(TaskState finalState) {
+ if (getState() == TaskState.RUNNING) {
+ metrics.endRunningTask(this);
+ }
+ return finalState;
+ }
+
+ //select the nextAttemptNumber with best progress
+ // always called inside the Read Lock
+ private TaskAttempt selectBestAttempt() {
+ float progress = 0f;
+ TaskAttempt result = null;
+ for (TaskAttempt at : attempts.values()) {
+ if (result == null) {
+ result = at; //The first time around
+ }
+ //TODO: consider the nextAttemptNumber only if it is not failed/killed ?
+ // calculate the best progress
+ if (at.getProgress() > progress) {
+ result = at;
+ progress = at.getProgress();
+ }
+ }
+ return result;
+ }
+
+ @Override
+ public boolean canCommit(TaskAttemptId taskAttemptID) {
+ readLock.lock();
+ boolean canCommit = false;
+ try {
+ if (commitAttempt != null) {
+ canCommit = taskAttemptID.equals(commitAttempt);
+ LOG.info("Result of canCommit for " + taskAttemptID + ":" + canCommit);
+ }
+ } finally {
+ readLock.unlock();
+ }
+ return canCommit;
+ }
+
+ protected abstract TaskAttemptImpl createAttempt();
+
+ // No override of this method may require that the subclass be initialized.
+ protected abstract int getMaxAttempts();
+
+ protected TaskAttempt getSuccessfulAttempt() {
+ readLock.lock();
+ try {
+ if (null == successfulAttempt) {
+ return null;
+ }
+ return attempts.get(successfulAttempt);
+ } finally {
+ readLock.unlock();
+ }
+ }
+
+ // This is always called in the Write Lock
+ private void addAndScheduleAttempt() {
+ TaskAttempt attempt = createAttempt();
+ LOG.info("Created attempt " + attempt.getID());
+ switch (attempts.size()) {
+ case 0:
+ attempts = Collections.singletonMap(attempt.getID(), attempt);
+ break;
+
+ case 1:
+ Map newAttempts
+ = new LinkedHashMap<TaskAttemptId, TaskAttempt>(maxAttempts);
+ newAttempts.putAll(attempts);
+ attempts = newAttempts;
+ attempts.put(attempt.getID(), attempt);
+ break;
+
+ default:
+ attempts.put(attempt.getID(), attempt);
+ break;
+ }
+ ++nextAttemptNumber;
+ ++numberUncompletedAttempts;
+ //schedule the nextAttemptNumber
+ if (failedAttempts > 0) {
+ eventHandler.handle(new TaskAttemptEvent(attempt.getID(),
+ TaskAttemptEventType.TA_RESCHEDULE));
+ } else {
+ eventHandler.handle(new TaskAttemptEvent(attempt.getID(),
+ TaskAttemptEventType.TA_SCHEDULE));
+ }
+ }
+
+ @Override
+ public void handle(TaskEvent event) {
+ LOG.info("Processing " + event.getTaskID() + " of type " + event.getType());
+ try {
+ writeLock.lock();
+ TaskState oldState = getState();
+ try {
+ stateMachine.doTransition(event.getType(), event);
+ } catch (InvalidStateTransitonException e) {
+ LOG.error("Can't handle this event at current state", e);
+ internalError(event.getType());
+ }
+ if (oldState != getState()) {
+ LOG.info(taskId + " Task Transitioned from " + oldState + " to "
+ + getState());
+ }
+
+ } finally {
+ writeLock.unlock();
+ }
+ }
+
+ private void internalError(TaskEventType type) {
+ eventHandler.handle(new JobDiagnosticsUpdateEvent(
+ this.taskId.getJobId(), "Invalid event " + type +
+ " on Task " + this.taskId));
+ eventHandler.handle(new JobEvent(this.taskId.getJobId(),
+ JobEventType.INTERNAL_ERROR));
+ }
+
+ // always called inside a transition, in turn inside the Write Lock
+ private void handleTaskAttemptCompletion(TaskAttemptId attemptId,
+ TaskAttemptCompletionEventStatus status) {
+ finishedAttempts++;
+ TaskAttempt attempt = attempts.get(attemptId);
+ //raise the completion event only if the container is assigned
+ // to nextAttemptNumber
+ if (attempt.getNodeHttpAddress() != null) {
+ TaskAttemptCompletionEvent tce = recordFactory.newRecordInstance(TaskAttemptCompletionEvent.class);
+ tce.setEventId(-1);
+ //TODO: XXXXXX hardcoded port
+ tce.setMapOutputServerAddress("http://" + attempt.getNodeHttpAddress().split(":")[0] + ":8080");
+ tce.setStatus(status);
+ tce.setAttemptId(attempt.getID());
+ int runTime = 0;
+ if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() !=0)
+ runTime = (int)(attempt.getFinishTime() - attempt.getLaunchTime());
+ tce.setAttemptRunTime(runTime);
+
+ //raise the event to job so that it adds the completion event to its
+ //data structures
+ eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
+ }
+ }
+
+ private static TaskFinishedEvent createTaskFinishedEvent(TaskImpl task, TaskState taskState) {
+ TaskFinishedEvent tfe =
+ new TaskFinishedEvent(TypeConverter.fromYarn(task.taskId),
+ task.getFinishTime(task.successfulAttempt),
+ TypeConverter.fromYarn(task.taskId.getTaskType()),
+ taskState.toString(),
+ TypeConverter.fromYarn(task.getCounters()));
+ return tfe;
+ }
+
+ private static TaskFailedEvent createTaskFailedEvent(TaskImpl task, List<String> diag, TaskState taskState, TaskAttemptId taId) {
+ StringBuilder errorSb = new StringBuilder();
+ if (diag != null) {
+ for (String d : diag) {
+ errorSb.append(", ").append(d);
+ }
+ }
+ TaskFailedEvent taskFailedEvent = new TaskFailedEvent(
+ TypeConverter.fromYarn(task.taskId),
+ // Hack since getFinishTime needs isFinished to be true and that doesn't happen till after the transition.
+ task.getFinishTime(taId),
+ TypeConverter.fromYarn(task.getType()),
+ errorSb.toString(),
+ taskState.toString(),
+ taId == null ? null : TypeConverter.fromYarn(taId));
+ return taskFailedEvent;
+ }
+
+ /**
+ * @return a String representation of the splits.
+ *
+ * Subclasses can override this method to provide their own representations
+ * of splits (if any).
+ *
+ */
+ protected String getSplitsAsString(){
+ return "";
+ }
+
+ private static class InitialScheduleTransition
+ implements SingleArcTransition<TaskImpl, TaskEvent> {
+
+ @Override
+ public void transition(TaskImpl task, TaskEvent event) {
+ task.addAndScheduleAttempt();
+ task.scheduledTime = task.clock.getTime();
+ TaskStartedEvent tse = new TaskStartedEvent(
+ TypeConverter.fromYarn(task.taskId), task.getLaunchTime(),
+ TypeConverter.fromYarn(task.taskId.getTaskType()),
+ task.getSplitsAsString());
+ task.eventHandler
+ .handle(new JobHistoryEvent(task.taskId.getJobId(), tse));
+ task.historyTaskStartGenerated = true;
+ }
+ }
+
+ // Used when creating a new attempt while one is already running.
+ // Currently we do this for speculation. In the future we may do this
+ // for tasks that failed in a way that might indicate application code
+ // problems, so we can take later failures in parallel and flush the
+ // job quickly when this happens.
+ private static class RedundantScheduleTransition
+ implements SingleArcTransition<TaskImpl, TaskEvent> {
+
+ @Override
+ public void transition(TaskImpl task, TaskEvent event) {
+ LOG.info("Scheduling a redundant attempt for task " + task.taskId);
+ task.addAndScheduleAttempt();
+ }
+ }
+
+ private static class AttemptCommitPendingTransition
+ implements SingleArcTransition<TaskImpl, TaskEvent> {
+ @Override
+ public void transition(TaskImpl task, TaskEvent event) {
+ TaskTAttemptEvent ev = (TaskTAttemptEvent) event;
+ // The nextAttemptNumber is commit pending, decide on set the commitAttempt
+ TaskAttemptId attemptID = ev.getTaskAttemptID();
+ if (task.commitAttempt == null) {
+ // TODO: validate attemptID
+ task.commitAttempt = attemptID;
+ LOG.info(attemptID + " given a go for committing the task output.");
+ } else {
+ // Don't think this can be a pluggable decision, so simply raise an
+ // event for the TaskAttempt to delete its output.
+ LOG.info(task.commitAttempt
+ + " already given a go for committing the task output, so killing "
+ + attemptID);
+ task.eventHandler.handle(new TaskAttemptEvent(
+ attemptID, TaskAttemptEventType.TA_KILL));
+ }
+ }
+ }
+
+ private static class AttemptSucceededTransition
+ implements SingleArcTransition<TaskImpl, TaskEvent> {
+ @Override
+ public void transition(TaskImpl task, TaskEvent event) {
+ task.handleTaskAttemptCompletion(
+ ((TaskTAttemptEvent) event).getTaskAttemptID(),
+ TaskAttemptCompletionEventStatus.SUCCEEDED);
+ --task.numberUncompletedAttempts;
+ task.successfulAttempt = ((TaskTAttemptEvent) event).getTaskAttemptID();
+ task.eventHandler.handle(new JobTaskEvent(
+ task.taskId, TaskState.SUCCEEDED));
+ LOG.info("Task succeeded with attempt " + task.successfulAttempt);
+ // issue kill to all other attempts
+ if (task.historyTaskStartGenerated) {
+ TaskFinishedEvent tfe = createTaskFinishedEvent(task,
+ TaskState.SUCCEEDED);
+ task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
+ tfe));
+ }
+ for (TaskAttempt attempt : task.attempts.values()) {
+ if (attempt.getID() != task.successfulAttempt &&
+ // This is okay because it can only talk us out of sending a
+ // TA_KILL message to an attempt that doesn't need one for
+ // other reasons.
+ !attempt.isFinished()) {
+ LOG.info("Issuing kill to other attempt " + attempt.getID());
+ task.eventHandler.handle(
+ new TaskAttemptEvent(attempt.getID(),
+ TaskAttemptEventType.TA_KILL));
+ }
+ }
+ task.finished(TaskState.SUCCEEDED);
+ }
+ }
+
+ private static class AttemptKilledTransition implements
+ SingleArcTransition<TaskImpl, TaskEvent> {
+ @Override
+ public void transition(TaskImpl task, TaskEvent event) {
+ task.handleTaskAttemptCompletion(
+ ((TaskTAttemptEvent) event).getTaskAttemptID(),
+ TaskAttemptCompletionEventStatus.KILLED);
+ --task.numberUncompletedAttempts;
+ if (task.successfulAttempt == null) {
+ task.addAndScheduleAttempt();
+ }
+ }
+ }
+
+
+ private static class KillWaitAttemptKilledTransition implements
+ MultipleArcTransition<TaskImpl, TaskEvent, TaskState> {
+
+ protected TaskState finalState = TaskState.KILLED;
+
+ @Override
+ public TaskState transition(TaskImpl task, TaskEvent event) {
+ task.handleTaskAttemptCompletion(
+ ((TaskTAttemptEvent) event).getTaskAttemptID(),
+ TaskAttemptCompletionEventStatus.KILLED);
+ // check whether all attempts are finished
+ if (task.finishedAttempts == task.attempts.size()) {
+ if (task.historyTaskStartGenerated) {
+ TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
+ finalState, null); // TODO JH verify failedAttempt null
+ task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
+ taskFailedEvent));
+ } else {
+ LOG.debug("Not generating HistoryFinish event since start event not" +
+ " generated for task: " + task.getID());
+ }
+
+ task.eventHandler.handle(
+ new JobTaskEvent(task.taskId, finalState));
+ return finalState;
+ }
+ return task.getState();
+ }
+ }
+
+ private static class AttemptFailedTransition implements
+ MultipleArcTransition<TaskImpl, TaskEvent, TaskState> {
+
+ @Override
+ public TaskState transition(TaskImpl task, TaskEvent event) {
+ task.failedAttempts++;
+ TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
+ TaskAttempt attempt = task.attempts.get(castEvent.getTaskAttemptID());
+ if (attempt.getAssignedContainerMgrAddress() != null) {
+ //container was assigned
+ task.eventHandler.handle(new ContainerFailedEvent(attempt.getID(),
+ attempt.getAssignedContainerMgrAddress()));
+ }
+
+ if (task.failedAttempts < task.maxAttempts) {
+ task.handleTaskAttemptCompletion(
+ ((TaskTAttemptEvent) event).getTaskAttemptID(),
+ TaskAttemptCompletionEventStatus.FAILED);
+ // we don't need a new event if we already have a spare
+ if (--task.numberUncompletedAttempts == 0
+ && task.successfulAttempt == null) {
+ task.addAndScheduleAttempt();
+ }
+ } else {
+ task.handleTaskAttemptCompletion(
+ ((TaskTAttemptEvent) event).getTaskAttemptID(),
+ TaskAttemptCompletionEventStatus.TIPFAILED);
+ TaskTAttemptEvent ev = (TaskTAttemptEvent) event;
+ TaskAttemptId taId = ev.getTaskAttemptID();
+
+ if (task.historyTaskStartGenerated) {
+ TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, attempt.getDiagnostics(),
+ TaskState.FAILED, taId);
+ task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
+ taskFailedEvent));
+ } else {
+ LOG.debug("Not generating HistoryFinish event since start event not" +
+ " generated for task: " + task.getID());
+ }
+ task.eventHandler.handle(
+ new JobTaskEvent(task.taskId, TaskState.FAILED));
+ return task.finished(TaskState.FAILED);
+ }
+ return getDefaultState(task);
+ }
+
+ protected TaskState getDefaultState(Task task) {
+ return task.getState();
+ }
+
+ protected void unSucceed(TaskImpl task) {
+ ++task.numberUncompletedAttempts;
+ task.successfulAttempt = null;
+ }
+ }
+
+ private static class MapRetroactiveFailureTransition
+ extends AttemptFailedTransition {
+
+ @Override
+ public TaskState transition(TaskImpl task, TaskEvent event) {
+ //verify that this occurs only for map task
+ //TODO: consider moving it to MapTaskImpl
+ if (!TaskType.MAP.equals(task.getType())) {
+ LOG.error("Unexpected event for REDUCE task " + event.getType());
+ task.internalError(event.getType());
+ }
+
+ // tell the job about the rescheduling
+ task.eventHandler.handle(
+ new JobMapTaskRescheduledEvent(task.taskId));
+ // super.transition is mostly coded for the case where an
+ // UNcompleted task failed. When a COMPLETED task retroactively
+ // fails, we have to let AttemptFailedTransition.transition
+ // believe that there's no redundancy.
+ unSucceed(task);
+ return super.transition(task, event);
+ }
+
+ @Override
+ protected TaskState getDefaultState(Task task) {
+ return TaskState.SCHEDULED;
+ }
+ }
+
+ private static class KillNewTransition
+ implements SingleArcTransition<TaskImpl, TaskEvent> {
+ @Override
+ public void transition(TaskImpl task, TaskEvent event) {
+
+ if (task.historyTaskStartGenerated) {
+ TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
+ TaskState.KILLED, null); // TODO Verify failedAttemptId is null
+ task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
+ taskFailedEvent));
+ }else {
+ LOG.debug("Not generating HistoryFinish event since start event not" +
+ " generated for task: " + task.getID());
+ }
+
+ task.eventHandler.handle(
+ new JobTaskEvent(task.taskId, TaskState.KILLED));
+ task.metrics.endWaitingTask(task);
+ }
+ }
+
+ private void killUnfinishedAttempt(TaskAttempt attempt, String logMsg) {
+ if (attempt != null && !attempt.isFinished()) {
+ eventHandler.handle(
+ new TaskAttemptEvent(attempt.getID(),
+ TaskAttemptEventType.TA_KILL));
+ }
+ }
+
+ private static class KillTransition
+ implements SingleArcTransition<TaskImpl, TaskEvent> {
+ @Override
+ public void transition(TaskImpl task, TaskEvent event) {
+ // issue kill to all non finished attempts
+ for (TaskAttempt attempt : task.attempts.values()) {
+ task.killUnfinishedAttempt
+ (attempt, "Task KILL is received. Killing attempt!");
+ }
+
+ task.numberUncompletedAttempts = 0;
+ }
+ }
+
+ static class LaunchTransition
+ implements SingleArcTransition<TaskImpl, TaskEvent> {
+ @Override
+ public void transition(TaskImpl task, TaskEvent event) {
+ task.metrics.launchedTask(task);
+ task.metrics.runningTask(task);
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncher.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncher.java
new file mode 100644
index 0000000..cc41db1
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncher.java
@@ -0,0 +1,31 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.launcher;
+
+
+import org.apache.hadoop.yarn.event.EventHandler;
+
+public interface ContainerLauncher
+ extends EventHandler<ContainerLauncherEvent> {
+
+ enum EventType {
+ CONTAINER_REMOTE_LAUNCH,
+ CONTAINER_REMOTE_CLEANUP
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherEvent.java
new file mode 100644
index 0000000..00bfc39
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherEvent.java
@@ -0,0 +1,114 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.launcher;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerToken;
+import org.apache.hadoop.yarn.event.AbstractEvent;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+
+public class ContainerLauncherEvent
+ extends AbstractEvent<ContainerLauncher.EventType> {
+
+ private TaskAttemptId taskAttemptID;
+ private ContainerId containerID;
+ private String containerMgrAddress;
+ private ContainerToken containerToken;
+
+ public ContainerLauncherEvent(TaskAttemptId taskAttemptID,
+ ContainerId containerID,
+ String containerMgrAddress,
+ ContainerToken containerToken,
+ ContainerLauncher.EventType type) {
+ super(type);
+ this.taskAttemptID = taskAttemptID;
+ this.containerID = containerID;
+ this.containerMgrAddress = containerMgrAddress;
+ this.containerToken = containerToken;
+ }
+
+ public TaskAttemptId getTaskAttemptID() {
+ return this.taskAttemptID;
+ }
+
+ public ContainerId getContainerID() {
+ return containerID;
+ }
+
+ public String getContainerMgrAddress() {
+ return containerMgrAddress;
+ }
+
+ public ContainerToken getContainerToken() {
+ return containerToken;
+ }
+
+ @Override
+ public String toString() {
+ return super.toString() + " for taskAttempt " + taskAttemptID;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result
+ + ((containerID == null) ? 0 : containerID.hashCode());
+ result = prime * result
+ + ((containerMgrAddress == null) ? 0 : containerMgrAddress.hashCode());
+ result = prime * result
+ + ((containerToken == null) ? 0 : containerToken.hashCode());
+ result = prime * result
+ + ((taskAttemptID == null) ? 0 : taskAttemptID.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ ContainerLauncherEvent other = (ContainerLauncherEvent) obj;
+ if (containerID == null) {
+ if (other.containerID != null)
+ return false;
+ } else if (!containerID.equals(other.containerID))
+ return false;
+ if (containerMgrAddress == null) {
+ if (other.containerMgrAddress != null)
+ return false;
+ } else if (!containerMgrAddress.equals(other.containerMgrAddress))
+ return false;
+ if (containerToken == null) {
+ if (other.containerToken != null)
+ return false;
+ } else if (!containerToken.equals(other.containerToken))
+ return false;
+ if (taskAttemptID == null) {
+ if (other.taskAttemptID != null)
+ return false;
+ } else if (!taskAttemptID.equals(other.taskAttemptID))
+ return false;
+ return true;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
new file mode 100644
index 0000000..6ac96f5
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
@@ -0,0 +1,279 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.launcher;
+
+import java.io.IOException;
+import java.security.PrivilegedAction;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.app.AMConstants;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerToken;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.security.ContainerManagerSecurityInfo;
+import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
+import org.apache.hadoop.yarn.service.AbstractService;
+
+/**
+ * This class is responsible for launching of containers.
+ */
+public class ContainerLauncherImpl extends AbstractService implements
+ ContainerLauncher {
+
+ private static final Log LOG = LogFactory.getLog(ContainerLauncherImpl.class);
+
+ private AppContext context;
+ private ThreadPoolExecutor launcherPool;
+ private Thread eventHandlingThread;
+ private BlockingQueue<ContainerLauncherEvent> eventQueue =
+ new LinkedBlockingQueue<ContainerLauncherEvent>();
+ private RecordFactory recordFactory;
+ //have a cache/map of UGIs so as to avoid creating too many RPC
+ //client connection objects to the same NodeManager
+ private Map<String, UserGroupInformation> ugiMap =
+ new HashMap<String, UserGroupInformation>();
+
+ public ContainerLauncherImpl(AppContext context) {
+ super(ContainerLauncherImpl.class.getName());
+ this.context = context;
+ }
+
+ @Override
+ public synchronized void init(Configuration conf) {
+ // Clone configuration for this component so that the SecurityInfo setting
+ // doesn't affect the original configuration
+ Configuration myLocalConfig = new Configuration(conf);
+ myLocalConfig.setClass(
+ YarnConfiguration.YARN_SECURITY_INFO,
+ ContainerManagerSecurityInfo.class, SecurityInfo.class);
+ this.recordFactory = RecordFactoryProvider.getRecordFactory(conf);
+ super.init(myLocalConfig);
+ }
+
+ public void start() {
+ launcherPool =
+ new ThreadPoolExecutor(getConfig().getInt(
+ AMConstants.CONTAINERLAUNCHER_THREADPOOL_SIZE, 10),
+ Integer.MAX_VALUE, 1, TimeUnit.HOURS,
+ new LinkedBlockingQueue<Runnable>());
+ launcherPool.prestartAllCoreThreads(); // Wait for work.
+ eventHandlingThread = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ ContainerLauncherEvent event = null;
+ while (!Thread.currentThread().isInterrupted()) {
+ try {
+ event = eventQueue.take();
+ } catch (InterruptedException e) {
+ LOG.error("Returning, interrupted : " + e);
+ return;
+ }
+ // the events from the queue are handled in parallel
+ // using a thread pool
+ launcherPool.execute(new EventProcessor(event));
+
+ // TODO: Group launching of multiple containers to a single
+ // NodeManager into a single connection
+ }
+ }
+ });
+ eventHandlingThread.start();
+ super.start();
+ }
+
+ public void stop() {
+ eventHandlingThread.interrupt();
+ launcherPool.shutdown();
+ super.stop();
+ }
+
+ protected ContainerManager getCMProxy(ContainerId containerID,
+ final String containerManagerBindAddr, ContainerToken containerToken)
+ throws IOException {
+
+ UserGroupInformation user = UserGroupInformation.getCurrentUser();
+
+ // TODO: Synchronization problems!!
+ if (UserGroupInformation.isSecurityEnabled()) {
+ if(!ugiMap.containsKey(containerManagerBindAddr)) {
+ Token<ContainerTokenIdentifier> token =
+ new Token<ContainerTokenIdentifier>(
+ containerToken.getIdentifier().array(),
+ containerToken.getPassword().array(), new Text(
+ containerToken.getKind()), new Text(
+ containerToken.getService()));
+ //the user in createRemoteUser in this context is not important
+ user = UserGroupInformation.createRemoteUser(containerManagerBindAddr);
+ user.addToken(token);
+ ugiMap.put(containerManagerBindAddr, user);
+ } else {
+ user = ugiMap.get(containerManagerBindAddr);
+ }
+ }
+ ContainerManager proxy =
+ user.doAs(new PrivilegedAction<ContainerManager>() {
+ @Override
+ public ContainerManager run() {
+ YarnRPC rpc = YarnRPC.create(getConfig());
+ return (ContainerManager) rpc.getProxy(ContainerManager.class,
+ NetUtils.createSocketAddr(containerManagerBindAddr),
+ getConfig());
+ }
+ });
+ return proxy;
+ }
+
+ /**
+ * Setup and start the container on remote nodemanager.
+ */
+ private class EventProcessor implements Runnable {
+ private ContainerLauncherEvent event;
+
+ EventProcessor(ContainerLauncherEvent event) {
+ this.event = event;
+ }
+
+ @Override
+ public void run() {
+ LOG.info("Processing the event " + event.toString());
+
+ // Load ContainerManager tokens before creating a connection.
+ // TODO: Do it only once per NodeManager.
+ final String containerManagerBindAddr = event.getContainerMgrAddress();
+ ContainerId containerID = event.getContainerID();
+ ContainerToken containerToken = event.getContainerToken();
+
+ switch(event.getType()) {
+
+ case CONTAINER_REMOTE_LAUNCH:
+ ContainerRemoteLaunchEvent launchEv = (ContainerRemoteLaunchEvent) event;
+
+ TaskAttemptId taskAttemptID = launchEv.getTaskAttemptID();
+ try {
+
+ ContainerManager proxy =
+ getCMProxy(containerID, containerManagerBindAddr, containerToken);
+
+ // Construct the actual Container
+ ContainerLaunchContext containerLaunchContext =
+ launchEv.getContainer();
+
+ // Now launch the actual container
+ StartContainerRequest startRequest = recordFactory
+ .newRecordInstance(StartContainerRequest.class);
+ startRequest.setContainerLaunchContext(containerLaunchContext);
+ proxy.startContainer(startRequest);
+
+ LOG.info("Returning from container-launch for " + taskAttemptID);
+
+ // after launching, send launched event to task attempt to move
+ // it from ASSIGNED to RUNNING state
+ context.getEventHandler().handle(
+ new TaskAttemptEvent(taskAttemptID,
+ TaskAttemptEventType.TA_CONTAINER_LAUNCHED));
+ } catch (Throwable t) {
+ String message = "Container launch failed for " + containerID
+ + " : " + StringUtils.stringifyException(t);
+ LOG.error(message);
+ context.getEventHandler().handle(
+ new TaskAttemptDiagnosticsUpdateEvent(taskAttemptID, message));
+ context.getEventHandler().handle(
+ new TaskAttemptEvent(taskAttemptID,
+ TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
+ }
+
+ break;
+
+ case CONTAINER_REMOTE_CLEANUP:
+ // We will have to remove the launch (meant "cleanup"? FIXME) event if it is still in eventQueue
+ // and not yet processed
+ if (eventQueue.contains(event)) {
+ eventQueue.remove(event); // TODO: Any synchro needed?
+ //deallocate the container
+ context.getEventHandler().handle(
+ new ContainerAllocatorEvent(event.getTaskAttemptID(),
+ ContainerAllocator.EventType.CONTAINER_DEALLOCATE));
+ } else {
+ try {
+ ContainerManager proxy =
+ getCMProxy(containerID, containerManagerBindAddr, containerToken);
+ // TODO:check whether container is launched
+
+ // kill the remote container if already launched
+ StopContainerRequest stopRequest = recordFactory
+ .newRecordInstance(StopContainerRequest.class);
+ stopRequest.setContainerId(event.getContainerID());
+ proxy.stopContainer(stopRequest);
+
+ } catch (Throwable t) {
+ //ignore the cleanup failure
+ LOG.warn("cleanup failed for container " + event.getContainerID() ,
+ t);
+ }
+
+ // after killing, send killed event to taskattempt
+ context.getEventHandler().handle(
+ new TaskAttemptEvent(event.getTaskAttemptID(),
+ TaskAttemptEventType.TA_CONTAINER_CLEANED));
+ }
+ break;
+ }
+ }
+
+ }
+
+ @Override
+ public void handle(ContainerLauncherEvent event) {
+ try {
+ eventQueue.put(event);
+ } catch (InterruptedException e) {
+ throw new YarnException(e);
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerRemoteLaunchEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerRemoteLaunchEvent.java
new file mode 100644
index 0000000..59ab7f0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerRemoteLaunchEvent.java
@@ -0,0 +1,40 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.launcher;
+
+import org.apache.hadoop.mapred.Task;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerToken;
+
+public abstract class ContainerRemoteLaunchEvent extends ContainerLauncherEvent {
+
+ public ContainerRemoteLaunchEvent(TaskAttemptId taskAttemptID,
+ ContainerId containerID, String containerMgrAddress,
+ ContainerToken containerToken) {
+ super(taskAttemptID, containerID, containerMgrAddress,
+ containerToken,
+ ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH);
+ }
+ public abstract ContainerLaunchContext getContainer();
+
+ public abstract Task getRemoteTask();
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
new file mode 100644
index 0000000..dfe9b8e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
@@ -0,0 +1,100 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.local;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.mapreduce.JobCounter;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent;
+import org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * Allocates containers locally. Doesn't allocate a real container;
+ * instead sends an allocated event for all requests.
+ */
+public class LocalContainerAllocator extends RMCommunicator
+ implements ContainerAllocator {
+
+ private static final Log LOG =
+ LogFactory.getLog(LocalContainerAllocator.class);
+
+ private final EventHandler eventHandler;
+ private final ApplicationId appID;
+ private AtomicInteger containerCount = new AtomicInteger();
+
+ private final RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(null);
+
+ public LocalContainerAllocator(ClientService clientService,
+ AppContext context) {
+ super(clientService, context);
+ this.eventHandler = context.getEventHandler();
+ this.appID = context.getApplicationID();
+ }
+
+ @Override
+ public void handle(ContainerAllocatorEvent event) {
+ if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) {
+ LOG.info("Processing the event " + event.toString());
+ ContainerId cID = recordFactory.newRecordInstance(ContainerId.class);
+ cID.setAppId(appID);
+ // use negative ids to denote that these are local. Need a better way ??
+ cID.setId((-1) * containerCount.getAndIncrement());
+
+ Container container = recordFactory.newRecordInstance(Container.class);
+ container.setId(cID);
+ NodeId nodeId = Records.newRecord(NodeId.class);
+ nodeId.setHost("localhost");
+ nodeId.setPort(1234);
+ container.setNodeId(nodeId);
+ container.setContainerToken(null);
+ container.setNodeHttpAddress("localhost:9999");
+ // send the container-assigned event to task attempt
+
+ if (event.getAttemptID().getTaskId().getTaskType() == TaskType.MAP) {
+ JobCounterUpdateEvent jce =
+ new JobCounterUpdateEvent(event.getAttemptID().getTaskId()
+ .getJobId());
+ // TODO Setting OTHER_LOCAL_MAP for now.
+ jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
+ eventHandler.handle(jce);
+ }
+ eventHandler.handle(new TaskAttemptContainerAssignedEvent(
+ event.getAttemptID(), container));
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/metrics/MRAppMetrics.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/metrics/MRAppMetrics.java
new file mode 100644
index 0000000..d511e33
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/metrics/MRAppMetrics.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.v2.app.metrics;
+
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MutableCounterInt;
+import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+@Metrics(about="MR App Metrics", context="mapred")
+public class MRAppMetrics {
+ @Metric MutableCounterInt jobsSubmitted;
+ @Metric MutableCounterInt jobsCompleted;
+ @Metric MutableCounterInt jobsFailed;
+ @Metric MutableCounterInt jobsKilled;
+ @Metric MutableGaugeInt jobsPreparing;
+ @Metric MutableGaugeInt jobsRunning;
+
+ @Metric MutableCounterInt mapsLaunched;
+ @Metric MutableCounterInt mapsCompleted;
+ @Metric MutableCounterInt mapsFailed;
+ @Metric MutableCounterInt mapsKilled;
+ @Metric MutableGaugeInt mapsRunning;
+ @Metric MutableGaugeInt mapsWaiting;
+
+ @Metric MutableCounterInt reducesLaunched;
+ @Metric MutableCounterInt reducesCompleted;
+ @Metric MutableCounterInt reducesFailed;
+ @Metric MutableCounterInt reducesKilled;
+ @Metric MutableGaugeInt reducesRunning;
+ @Metric MutableGaugeInt reducesWaiting;
+
+ public static MRAppMetrics create() {
+ return create(DefaultMetricsSystem.instance());
+ }
+
+ public static MRAppMetrics create(MetricsSystem ms) {
+ JvmMetrics.create("MRAppMaster", null, ms);
+ return ms.register(new MRAppMetrics());
+ }
+
+ // potential instrumentation interface methods
+
+ public void submittedJob(Job job) {
+ jobsSubmitted.incr();
+ }
+
+ public void completedJob(Job job) {
+ jobsCompleted.incr();
+ }
+
+ public void failedJob(Job job) {
+ jobsFailed.incr();
+ }
+
+ public void killedJob(Job job) {
+ jobsKilled.incr();
+ }
+
+ public void preparingJob(Job job) {
+ jobsPreparing.incr();
+ }
+
+ public void endPreparingJob(Job job) {
+ jobsPreparing.decr();
+ }
+
+ public void runningJob(Job job) {
+ jobsRunning.incr();
+ }
+
+ public void endRunningJob(Job job) {
+ jobsRunning.decr();
+ }
+
+ public void launchedTask(Task task) {
+ switch (task.getType()) {
+ case MAP:
+ mapsLaunched.incr();
+ break;
+ case REDUCE:
+ reducesLaunched.incr();
+ break;
+ }
+ endWaitingTask(task);
+ }
+
+ public void completedTask(Task task) {
+ switch (task.getType()) {
+ case MAP:
+ mapsCompleted.incr();
+ break;
+ case REDUCE:
+ reducesCompleted.incr();
+ break;
+ }
+ }
+
+ public void failedTask(Task task) {
+ switch (task.getType()) {
+ case MAP:
+ mapsFailed.incr();
+ break;
+ case REDUCE:
+ reducesFailed.incr();
+ break;
+ }
+ }
+
+ public void killedTask(Task task) {
+ switch (task.getType()) {
+ case MAP:
+ mapsKilled.incr();
+ break;
+ case REDUCE:
+ reducesKilled.incr();
+ break;
+ }
+ }
+
+ public void runningTask(Task task) {
+ switch (task.getType()) {
+ case MAP:
+ mapsRunning.incr();
+ break;
+ case REDUCE:
+ reducesRunning.incr();
+ break;
+ }
+ }
+
+ public void endRunningTask(Task task) {
+ switch (task.getType()) {
+ case MAP:
+ mapsRunning.decr();
+ break;
+ case REDUCE:
+ reducesRunning.decr();
+ break;
+ }
+ }
+
+ public void waitingTask(Task task) {
+ switch (task.getType()) {
+ case MAP:
+ mapsWaiting.incr();
+ break;
+ case REDUCE:
+ reducesWaiting.incr();
+ }
+ }
+
+ public void endWaitingTask(Task task) {
+ switch (task.getType()) {
+ case MAP:
+ mapsWaiting.decr();
+ break;
+ case REDUCE:
+ reducesWaiting.decr();
+ break;
+ }
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/ControlledClock.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/ControlledClock.java
new file mode 100644
index 0000000..06ab6f7
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/ControlledClock.java
@@ -0,0 +1,43 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.mapreduce.v2.app.recover;
+
+import org.apache.hadoop.yarn.Clock;
+
+class ControlledClock implements Clock {
+ private long time = -1;
+ private final Clock actualClock;
+ ControlledClock(Clock actualClock) {
+ this.actualClock = actualClock;
+ }
+ synchronized void setTime(long time) {
+ this.time = time;
+ }
+ synchronized void reset() {
+ time = -1;
+ }
+
+ @Override
+ public synchronized long getTime() {
+ if (time != -1) {
+ return time;
+ }
+ return actualClock.getTime();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/Recovery.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/Recovery.java
new file mode 100644
index 0000000..8005714
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/Recovery.java
@@ -0,0 +1,34 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.recover;
+
+import java.util.Set;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.event.Dispatcher;
+
+public interface Recovery {
+
+ Dispatcher getDispatcher();
+
+ Clock getClock();
+
+ Set<TaskId> getCompletedTasks();
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java
new file mode 100644
index 0000000..c1e19b9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java
@@ -0,0 +1,368 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.recover;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
+import org.apache.hadoop.mapreduce.v2.api.records.Phase;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
+import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleaner;
+import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleanupEvent;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.Event;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.service.CompositeService;
+import org.apache.hadoop.yarn.service.Service;
+
+/*
+ * Recovers the completed tasks from the previous life of Application Master.
+ * The completed tasks are deciphered from the history file of the previous life.
+ * Recovery service intercepts and replay the events for completed tasks.
+ * While recovery is in progress, the scheduling of new tasks are delayed by
+ * buffering the task schedule events.
+ * The recovery service controls the clock while recovery is in progress.
+ */
+
+//TODO:
+//task cleanup for all non completed tasks
+//change job output committer to have
+// - atomic job output promotion
+// - recover output of completed tasks
+
+public class RecoveryService extends CompositeService implements Recovery {
+
+ private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+ private static final Log LOG = LogFactory.getLog(RecoveryService.class);
+
+ private final ApplicationId appID;
+ private final Dispatcher dispatcher;
+ private final ControlledClock clock;
+ private final int startCount;
+
+ private JobInfo jobInfo = null;
+ private final Map<TaskId, TaskInfo> completedTasks =
+ new HashMap<TaskId, TaskInfo>();
+
+ private final List<TaskEvent> pendingTaskScheduleEvents =
+ new ArrayList<TaskEvent>();
+
+ private volatile boolean recoveryMode = false;
+
+ public RecoveryService(ApplicationId appID, Clock clock, int startCount) {
+ super("RecoveringDispatcher");
+ this.appID = appID;
+ this.startCount = startCount;
+ this.dispatcher = new RecoveryDispatcher();
+ this.clock = new ControlledClock(clock);
+ addService((Service) dispatcher);
+ }
+
+ @Override
+ public void init(Configuration conf) {
+ super.init(conf);
+ // parse the history file
+ try {
+ parse();
+ if (completedTasks.size() > 0) {
+ recoveryMode = true;
+ LOG.info("SETTING THE RECOVERY MODE TO TRUE. NO OF COMPLETED TASKS " +
+ "TO RECOVER " + completedTasks.size());
+ LOG.info("Job launch time " + jobInfo.getLaunchTime());
+ clock.setTime(jobInfo.getLaunchTime());
+ }
+ } catch (IOException e) {
+ LOG.warn(e);
+ LOG.warn("Could not parse the old history file. Aborting recovery. "
+ + "Starting afresh.");
+ }
+ }
+
+ @Override
+ public Dispatcher getDispatcher() {
+ return dispatcher;
+ }
+
+ @Override
+ public Clock getClock() {
+ return clock;
+ }
+
+ @Override
+ public Set<TaskId> getCompletedTasks() {
+ return completedTasks.keySet();
+ }
+
+ private void parse() throws IOException {
+ // TODO: parse history file based on startCount
+ String jobName = TypeConverter.fromYarn(appID).toString();
+ String jobhistoryDir = JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(getConfig());
+ FSDataInputStream in = null;
+ Path historyFile = null;
+ Path histDirPath = FileContext.getFileContext(getConfig()).makeQualified(
+ new Path(jobhistoryDir));
+ FileContext fc = FileContext.getFileContext(histDirPath.toUri(),
+ getConfig());
+ historyFile = fc.makeQualified(JobHistoryUtils.getStagingJobHistoryFile(
+ histDirPath, jobName, startCount - 1)); //read the previous history file
+ in = fc.open(historyFile);
+ JobHistoryParser parser = new JobHistoryParser(in);
+ jobInfo = parser.parse();
+ Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo
+ .getAllTasks();
+ for (TaskInfo taskInfo : taskInfos.values()) {
+ if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) {
+ completedTasks
+ .put(TypeConverter.toYarn(taskInfo.getTaskId()), taskInfo);
+ LOG.info("Read from history task "
+ + TypeConverter.toYarn(taskInfo.getTaskId()));
+ }
+ }
+ LOG.info("Read completed tasks from history "
+ + completedTasks.size());
+ }
+
+ class RecoveryDispatcher extends AsyncDispatcher {
+ private final EventHandler actualHandler;
+ private final EventHandler handler;
+
+ RecoveryDispatcher() {
+ actualHandler = super.getEventHandler();
+ handler = new InterceptingEventHandler(actualHandler);
+ }
+
+ @Override
+ public void dispatch(Event event) {
+ if (recoveryMode) {
+ if (event.getType() == TaskAttemptEventType.TA_CONTAINER_LAUNCHED) {
+ TaskAttemptInfo attInfo = getTaskAttemptInfo(((TaskAttemptEvent) event)
+ .getTaskAttemptID());
+ LOG.info("Attempt start time " + attInfo.getStartTime());
+ clock.setTime(attInfo.getStartTime());
+
+ } else if (event.getType() == TaskAttemptEventType.TA_DONE
+ || event.getType() == TaskAttemptEventType.TA_FAILMSG
+ || event.getType() == TaskAttemptEventType.TA_KILL) {
+ TaskAttemptInfo attInfo = getTaskAttemptInfo(((TaskAttemptEvent) event)
+ .getTaskAttemptID());
+ LOG.info("Attempt finish time " + attInfo.getFinishTime());
+ clock.setTime(attInfo.getFinishTime());
+ }
+
+ else if (event.getType() == TaskEventType.T_ATTEMPT_FAILED
+ || event.getType() == TaskEventType.T_ATTEMPT_KILLED
+ || event.getType() == TaskEventType.T_ATTEMPT_SUCCEEDED) {
+ TaskTAttemptEvent tEvent = (TaskTAttemptEvent) event;
+ LOG.info("Recovered Task attempt " + tEvent.getTaskAttemptID());
+ TaskInfo taskInfo = completedTasks.get(tEvent.getTaskAttemptID()
+ .getTaskId());
+ taskInfo.getAllTaskAttempts().remove(
+ TypeConverter.fromYarn(tEvent.getTaskAttemptID()));
+ // remove the task info from completed tasks if all attempts are
+ // recovered
+ if (taskInfo.getAllTaskAttempts().size() == 0) {
+ completedTasks.remove(tEvent.getTaskAttemptID().getTaskId());
+ // checkForRecoveryComplete
+ LOG.info("CompletedTasks() " + completedTasks.size());
+ if (completedTasks.size() == 0) {
+ recoveryMode = false;
+ clock.reset();
+ LOG.info("Setting the recovery mode to false. " +
+ "Recovery is complete!");
+
+ // send all pending tasks schedule events
+ for (TaskEvent tEv : pendingTaskScheduleEvents) {
+ actualHandler.handle(tEv);
+ }
+
+ }
+ }
+ }
+ }
+ super.dispatch(event);
+ }
+
+ @Override
+ public EventHandler getEventHandler() {
+ return handler;
+ }
+ }
+
+ private TaskAttemptInfo getTaskAttemptInfo(TaskAttemptId id) {
+ TaskInfo taskInfo = completedTasks.get(id.getTaskId());
+ return taskInfo.getAllTaskAttempts().get(TypeConverter.fromYarn(id));
+ }
+
+ private class InterceptingEventHandler implements EventHandler {
+ EventHandler actualHandler;
+
+ InterceptingEventHandler(EventHandler actualHandler) {
+ this.actualHandler = actualHandler;
+ }
+
+ @Override
+ public void handle(Event event) {
+ if (!recoveryMode) {
+ // delegate to the dispatcher one
+ actualHandler.handle(event);
+ return;
+ }
+
+ else if (event.getType() == TaskEventType.T_SCHEDULE) {
+ TaskEvent taskEvent = (TaskEvent) event;
+ // delay the scheduling of new tasks till previous ones are recovered
+ if (completedTasks.get(taskEvent.getTaskID()) == null) {
+ LOG.debug("Adding to pending task events "
+ + taskEvent.getTaskID());
+ pendingTaskScheduleEvents.add(taskEvent);
+ return;
+ }
+ }
+
+ else if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) {
+ TaskAttemptId aId = ((ContainerAllocatorEvent) event).getAttemptID();
+ TaskAttemptInfo attInfo = getTaskAttemptInfo(aId);
+ LOG.debug("CONTAINER_REQ " + aId);
+ sendAssignedEvent(aId, attInfo);
+ return;
+ }
+
+ else if (event.getType() == TaskCleaner.EventType.TASK_CLEAN) {
+ TaskAttemptId aId = ((TaskCleanupEvent) event).getAttemptID();
+ LOG.debug("TASK_CLEAN");
+ actualHandler.handle(new TaskAttemptEvent(aId,
+ TaskAttemptEventType.TA_CLEANUP_DONE));
+ return;
+ }
+
+ else if (event.getType() == ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH) {
+ TaskAttemptId aId = ((ContainerRemoteLaunchEvent) event)
+ .getTaskAttemptID();
+ TaskAttemptInfo attInfo = getTaskAttemptInfo(aId);
+ actualHandler.handle(new TaskAttemptEvent(aId,
+ TaskAttemptEventType.TA_CONTAINER_LAUNCHED));
+ // send the status update event
+ sendStatusUpdateEvent(aId, attInfo);
+
+ TaskAttemptState state = TaskAttemptState.valueOf(attInfo.getTaskStatus());
+ switch (state) {
+ case SUCCEEDED:
+ // send the done event
+ LOG.info("Sending done event to " + aId);
+ actualHandler.handle(new TaskAttemptEvent(aId,
+ TaskAttemptEventType.TA_DONE));
+ break;
+ case KILLED:
+ LOG.info("Sending kill event to " + aId);
+ actualHandler.handle(new TaskAttemptEvent(aId,
+ TaskAttemptEventType.TA_KILL));
+ break;
+ default:
+ LOG.info("Sending fail event to " + aId);
+ actualHandler.handle(new TaskAttemptEvent(aId,
+ TaskAttemptEventType.TA_FAILMSG));
+ break;
+ }
+ return;
+ }
+
+ // delegate to the actual handler
+ actualHandler.handle(event);
+ }
+
+ private void sendStatusUpdateEvent(TaskAttemptId yarnAttemptID,
+ TaskAttemptInfo attemptInfo) {
+ LOG.info("Sending status update event to " + yarnAttemptID);
+ TaskAttemptStatus taskAttemptStatus = new TaskAttemptStatus();
+ taskAttemptStatus.id = yarnAttemptID;
+ taskAttemptStatus.progress = 1.0f;
+ taskAttemptStatus.diagnosticInfo = "";
+ taskAttemptStatus.stateString = attemptInfo.getTaskStatus();
+ // taskAttemptStatus.outputSize = attemptInfo.getOutputSize();
+ taskAttemptStatus.phase = Phase.CLEANUP;
+ org.apache.hadoop.mapreduce.Counters cntrs = attemptInfo.getCounters();
+ if (cntrs == null) {
+ taskAttemptStatus.counters = null;
+ } else {
+ taskAttemptStatus.counters = TypeConverter.toYarn(attemptInfo
+ .getCounters());
+ }
+ actualHandler.handle(new TaskAttemptStatusUpdateEvent(
+ taskAttemptStatus.id, taskAttemptStatus));
+ }
+
+ private void sendAssignedEvent(TaskAttemptId yarnAttemptID,
+ TaskAttemptInfo attemptInfo) {
+ LOG.info("Sending assigned event to " + yarnAttemptID);
+ ContainerId cId = recordFactory
+ .newRecordInstance(ContainerId.class);
+ Container container = recordFactory
+ .newRecordInstance(Container.class);
+ container.setId(cId);
+ container.setNodeId(recordFactory
+ .newRecordInstance(NodeId.class));
+ container.setContainerToken(null);
+ container.setNodeHttpAddress(attemptInfo.getHostname() + ":" +
+ attemptInfo.getHttpPort());
+ actualHandler.handle(new TaskAttemptContainerAssignedEvent(yarnAttemptID,
+ container));
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerAllocator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerAllocator.java
new file mode 100644
index 0000000..631e0f7
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerAllocator.java
@@ -0,0 +1,32 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.rm;
+
+import org.apache.hadoop.yarn.event.EventHandler;
+
+public interface ContainerAllocator extends EventHandler<ContainerAllocatorEvent>{
+
+ enum EventType {
+
+ CONTAINER_REQ,
+ CONTAINER_DEALLOCATE,
+ CONTAINER_FAILED
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerAllocatorEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerAllocatorEvent.java
new file mode 100644
index 0000000..9b6e837
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerAllocatorEvent.java
@@ -0,0 +1,38 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.rm;
+
+import org.apache.hadoop.yarn.event.AbstractEvent;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+
+public class ContainerAllocatorEvent extends
+ AbstractEvent<ContainerAllocator.EventType> {
+
+ private TaskAttemptId attemptID;
+
+ public ContainerAllocatorEvent(TaskAttemptId attemptID,
+ ContainerAllocator.EventType type) {
+ super(type);
+ this.attemptID = attemptID;
+ }
+
+ public TaskAttemptId getAttemptID() {
+ return attemptID;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerFailedEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerFailedEvent.java
new file mode 100644
index 0000000..726b654
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerFailedEvent.java
@@ -0,0 +1,18 @@
+package org.apache.hadoop.mapreduce.v2.app.rm;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+
+public class ContainerFailedEvent extends ContainerAllocatorEvent {
+
+ private final String contMgrAddress;
+
+ public ContainerFailedEvent(TaskAttemptId attemptID, String contMgrAddr) {
+ super(attemptID, ContainerAllocator.EventType.CONTAINER_FAILED);
+ this.contMgrAddress = contMgrAddr;
+ }
+
+ public String getContMgrAddress() {
+ return contMgrAddress;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerRequestEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerRequestEvent.java
new file mode 100644
index 0000000..215a2b3
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerRequestEvent.java
@@ -0,0 +1,68 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.rm;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.yarn.api.records.Resource;
+
+
+public class ContainerRequestEvent extends ContainerAllocatorEvent {
+
+ private final Resource capability;
+ private final String[] hosts;
+ private final String[] racks;
+ private boolean earlierAttemptFailed = false;
+
+ public ContainerRequestEvent(TaskAttemptId attemptID,
+ Resource capability,
+ String[] hosts, String[] racks) {
+ super(attemptID, ContainerAllocator.EventType.CONTAINER_REQ);
+ this.capability = capability;
+ this.hosts = hosts;
+ this.racks = racks;
+ }
+
+ ContainerRequestEvent(TaskAttemptId attemptID, Resource capability) {
+ this(attemptID, capability, new String[0], new String[0]);
+ this.earlierAttemptFailed = true;
+ }
+
+ public static ContainerRequestEvent createContainerRequestEventForFailedContainer(
+ TaskAttemptId attemptID,
+ Resource capability) {
+ //ContainerRequest for failed events does not consider rack / node locality?
+ return new ContainerRequestEvent(attemptID, capability);
+ }
+
+ public Resource getCapability() {
+ return capability;
+ }
+
+ public String[] getHosts() {
+ return hosts;
+ }
+
+ public String[] getRacks() {
+ return racks;
+ }
+
+ public boolean getEarlierAttemptFailed() {
+ return earlierAttemptFailed;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
new file mode 100644
index 0000000..dfebd27
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
@@ -0,0 +1,280 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.rm;
+
+import java.io.IOException;
+import java.security.PrivilegedAction;
+import java.util.ArrayList;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.app.AMConstants;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.AMRMProtocol;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.AMResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.security.SchedulerSecurityInfo;
+import org.apache.hadoop.yarn.service.AbstractService;
+
+/**
+ * Registers/unregisters to RM and sends heartbeats to RM.
+ */
+public class RMCommunicator extends AbstractService {
+ private static final Log LOG = LogFactory.getLog(RMContainerAllocator.class);
+ private int rmPollInterval;//millis
+ protected ApplicationId applicationId;
+ protected ApplicationAttemptId applicationAttemptId;
+ private volatile boolean stopped;
+ protected Thread allocatorThread;
+ protected EventHandler eventHandler;
+ protected AMRMProtocol scheduler;
+ private final ClientService clientService;
+ private int lastResponseID;
+ private Resource minContainerCapability;
+ private Resource maxContainerCapability;
+
+ private final RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(null);
+
+ private final AppContext context;
+ private Job job;
+
+ public RMCommunicator(ClientService clientService, AppContext context) {
+ super("RMCommunicator");
+ this.clientService = clientService;
+ this.context = context;
+ this.eventHandler = context.getEventHandler();
+ this.applicationId = context.getApplicationID();
+ this.applicationAttemptId = context.getApplicationAttemptId();
+ }
+
+ @Override
+ public void init(Configuration conf) {
+ super.init(conf);
+ rmPollInterval =
+ conf.getInt(AMConstants.AM_RM_SCHEDULE_INTERVAL,
+ AMConstants.DEFAULT_AM_RM_SCHEDULE_INTERVAL);
+ }
+
+ @Override
+ public void start() {
+ scheduler= createSchedulerProxy();
+ //LOG.info("Scheduler is " + scheduler);
+ register();
+ startAllocatorThread();
+ JobID id = TypeConverter.fromYarn(context.getApplicationID());
+ JobId jobId = TypeConverter.toYarn(id);
+ job = context.getJob(jobId);
+ super.start();
+ }
+
+ protected AppContext getContext() {
+ return context;
+ }
+
+ protected Job getJob() {
+ return job;
+ }
+
+ protected void register() {
+ //Register
+ String host =
+ clientService.getBindAddress().getAddress().getHostAddress();
+ try {
+ RegisterApplicationMasterRequest request =
+ recordFactory.newRecordInstance(RegisterApplicationMasterRequest.class);
+ request.setApplicationAttemptId(applicationAttemptId);
+ request.setHost(host);
+ request.setRpcPort(clientService.getBindAddress().getPort());
+ request.setTrackingUrl(host + ":" + clientService.getHttpPort());
+ RegisterApplicationMasterResponse response =
+ scheduler.registerApplicationMaster(request);
+ minContainerCapability = response.getMinimumResourceCapability();
+ maxContainerCapability = response.getMaximumResourceCapability();
+ LOG.info("minContainerCapability: " + minContainerCapability.getMemory());
+ LOG.info("maxContainerCapability: " + maxContainerCapability.getMemory());
+ } catch (Exception are) {
+ LOG.info("Exception while registering", are);
+ throw new YarnException(are);
+ }
+ }
+
+ protected void unregister() {
+ try {
+ String finalState = "RUNNING";
+ if (job.getState() == JobState.SUCCEEDED) {
+ finalState = "SUCCEEDED";
+ } else if (job.getState() == JobState.KILLED) {
+ finalState = "KILLED";
+ } else if (job.getState() == JobState.FAILED
+ || job.getState() == JobState.ERROR) {
+ finalState = "FAILED";
+ }
+ StringBuffer sb = new StringBuffer();
+ for (String s : job.getDiagnostics()) {
+ sb.append(s).append("\n");
+ }
+ LOG.info("Setting job diagnostics to " + sb.toString());
+
+ String historyUrl = JobHistoryUtils.getHistoryUrl(getConfig(),
+ context.getApplicationID());
+ LOG.info("History url is " + historyUrl);
+
+ FinishApplicationMasterRequest request =
+ recordFactory.newRecordInstance(FinishApplicationMasterRequest.class);
+ request.setAppAttemptId(this.applicationAttemptId);
+ request.setFinalState(finalState.toString());
+ request.setDiagnostics(sb.toString());
+ request.setTrackingUrl(historyUrl);
+ scheduler.finishApplicationMaster(request);
+ } catch(Exception are) {
+ LOG.info("Exception while unregistering ", are);
+ }
+ }
+
+ protected Resource getMinContainerCapability() {
+ return minContainerCapability;
+ }
+
+ protected Resource getMaxContainerCapability() {
+ return maxContainerCapability;
+ }
+
+ @Override
+ public void stop() {
+ stopped = true;
+ allocatorThread.interrupt();
+ try {
+ allocatorThread.join();
+ } catch (InterruptedException ie) {
+ LOG.info("InterruptedException while stopping", ie);
+ }
+ unregister();
+ super.stop();
+ }
+
+ protected void startAllocatorThread() {
+ allocatorThread = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ while (!stopped && !Thread.currentThread().isInterrupted()) {
+ try {
+ Thread.sleep(rmPollInterval);
+ try {
+ heartbeat();
+ } catch (Exception e) {
+ LOG.error("ERROR IN CONTACTING RM. ", e);
+ // TODO: for other exceptions
+ }
+ } catch (InterruptedException e) {
+ LOG.info("Allocated thread interrupted. Returning.");
+ return;
+ }
+ }
+ }
+ });
+ allocatorThread.start();
+ }
+
+ protected AMRMProtocol createSchedulerProxy() {
+ final YarnRPC rpc = YarnRPC.create(getConfig());
+ final Configuration conf = new Configuration(getConfig());
+ final String serviceAddr = conf.get(
+ YarnConfiguration.SCHEDULER_ADDRESS,
+ YarnConfiguration.DEFAULT_SCHEDULER_BIND_ADDRESS);
+
+ UserGroupInformation currentUser;
+ try {
+ currentUser = UserGroupInformation.getCurrentUser();
+ } catch (IOException e) {
+ throw new YarnException(e);
+ }
+
+ if (UserGroupInformation.isSecurityEnabled()) {
+ conf.setClass(YarnConfiguration.YARN_SECURITY_INFO,
+ SchedulerSecurityInfo.class, SecurityInfo.class);
+
+ String tokenURLEncodedStr = System.getenv().get(
+ ApplicationConstants.APPLICATION_MASTER_TOKEN_ENV_NAME);
+ LOG.debug("AppMasterToken is " + tokenURLEncodedStr);
+ Token<? extends TokenIdentifier> token = new Token<TokenIdentifier>();
+
+ try {
+ token.decodeFromUrlString(tokenURLEncodedStr);
+ } catch (IOException e) {
+ throw new YarnException(e);
+ }
+
+ currentUser.addToken(token);
+ }
+
+ return currentUser.doAs(new PrivilegedAction<AMRMProtocol>() {
+ @Override
+ public AMRMProtocol run() {
+ return (AMRMProtocol) rpc.getProxy(AMRMProtocol.class,
+ NetUtils.createSocketAddr(serviceAddr), conf);
+ }
+ });
+ }
+
+ protected synchronized void heartbeat() throws Exception {
+ AllocateRequest allocateRequest =
+ recordFactory.newRecordInstance(AllocateRequest.class);
+ allocateRequest.setApplicationAttemptId(applicationAttemptId);
+ allocateRequest.setResponseId(lastResponseID);
+ allocateRequest.addAllAsks(new ArrayList<ResourceRequest>());
+ allocateRequest.addAllReleases(new ArrayList<ContainerId>());
+ AllocateResponse allocateResponse = scheduler.allocate(allocateRequest);
+ AMResponse response = allocateResponse.getAMResponse();
+ if (response.getReboot()) {
+ LOG.info("Event from RM: shutting down Application Master");
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
new file mode 100644
index 0000000..fad43bd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
@@ -0,0 +1,784 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.rm;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.JobCounter;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.AMConstants;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.yarn.api.records.AMResponse;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.util.RackResolver;
+
+/**
+ * Allocates the container from the ResourceManager scheduler.
+ */
+public class RMContainerAllocator extends RMContainerRequestor
+ implements ContainerAllocator {
+
+ private static final Log LOG = LogFactory.getLog(RMContainerAllocator.class);
+
+ public static final
+ float DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART = 0.05f;
+
+ private static final Priority PRIORITY_FAST_FAIL_MAP;
+ private static final Priority PRIORITY_REDUCE;
+ private static final Priority PRIORITY_MAP;
+
+ static {
+ PRIORITY_FAST_FAIL_MAP = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(Priority.class);
+ PRIORITY_FAST_FAIL_MAP.setPriority(5);
+ PRIORITY_REDUCE = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(Priority.class);
+ PRIORITY_REDUCE.setPriority(10);
+ PRIORITY_MAP = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(Priority.class);
+ PRIORITY_MAP.setPriority(20);
+ }
+
+ /*
+ Vocabulory Used:
+ pending -> requests which are NOT yet sent to RM
+ scheduled -> requests which are sent to RM but not yet assigned
+ assigned -> requests which are assigned to a container
+ completed -> request corresponding to which container has completed
+
+ Lifecycle of map
+ scheduled->assigned->completed
+
+ Lifecycle of reduce
+ pending->scheduled->assigned->completed
+
+ Maps are scheduled as soon as their requests are received. Reduces are
+ added to the pending and are ramped up (added to scheduled) based
+ on completed maps and current availability in the cluster.
+ */
+
+ //reduces which are not yet scheduled
+ private final LinkedList<ContainerRequest> pendingReduces =
+ new LinkedList<ContainerRequest>();
+
+ //holds information about the assigned containers to task attempts
+ private final AssignedRequests assignedRequests = new AssignedRequests();
+
+ //holds scheduled requests to be fulfilled by RM
+ private final ScheduledRequests scheduledRequests = new ScheduledRequests();
+
+ private int containersAllocated = 0;
+ private int containersReleased = 0;
+ private int hostLocalAssigned = 0;
+ private int rackLocalAssigned = 0;
+
+ private boolean recalculateReduceSchedule = false;
+ private int mapResourceReqt;//memory
+ private int reduceResourceReqt;//memory
+ private int completedMaps = 0;
+ private int completedReduces = 0;
+
+ private boolean reduceStarted = false;
+ private float maxReduceRampupLimit = 0;
+ private float maxReducePreemptionLimit = 0;
+ private float reduceSlowStart = 0;
+
+ public RMContainerAllocator(ClientService clientService, AppContext context) {
+ super(clientService, context);
+ }
+
+ @Override
+ public void init(Configuration conf) {
+ super.init(conf);
+ reduceSlowStart = conf.getFloat(
+ MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART,
+ DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART);
+ maxReduceRampupLimit = conf.getFloat(
+ AMConstants.REDUCE_RAMPUP_UP_LIMIT,
+ AMConstants.DEFAULT_REDUCE_RAMP_UP_LIMIT);
+ maxReducePreemptionLimit = conf.getFloat(
+ AMConstants.REDUCE_PREEMPTION_LIMIT,
+ AMConstants.DEFAULT_REDUCE_PREEMPTION_LIMIT);
+ RackResolver.init(conf);
+ }
+
+ @Override
+ protected synchronized void heartbeat() throws Exception {
+ LOG.info("Before Scheduling: " + getStat());
+ List<Container> allocatedContainers = getResources();
+ LOG.info("After Scheduling: " + getStat());
+ if (allocatedContainers.size() > 0) {
+ LOG.info("Before Assign: " + getStat());
+ scheduledRequests.assign(allocatedContainers);
+ LOG.info("After Assign: " + getStat());
+ }
+
+ if (recalculateReduceSchedule) {
+ preemptReducesIfNeeded();
+ scheduleReduces();
+ recalculateReduceSchedule = false;
+ }
+ }
+
+ @Override
+ public void stop() {
+ super.stop();
+ LOG.info("Final Stats: " + getStat());
+ }
+
+ @Override
+ public synchronized void handle(ContainerAllocatorEvent event) {
+ LOG.info("Processing the event " + event.toString());
+ recalculateReduceSchedule = true;
+ if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) {
+ ContainerRequestEvent reqEvent = (ContainerRequestEvent) event;
+ if (reqEvent.getAttemptID().getTaskId().getTaskType().equals(TaskType.MAP)) {
+ if (mapResourceReqt == 0) {
+ mapResourceReqt = reqEvent.getCapability().getMemory();
+ int minSlotMemSize = getMinContainerCapability().getMemory();
+ mapResourceReqt = (int) Math.ceil((float) mapResourceReqt/minSlotMemSize) * minSlotMemSize;
+ LOG.info("mapResourceReqt:"+mapResourceReqt);
+ if (mapResourceReqt > getMaxContainerCapability().getMemory()) {
+ String diagMsg = "MAP capability required is more than the supported " +
+ "max container capability in the cluster. Killing the Job. mapResourceReqt: " +
+ mapResourceReqt + " maxContainerCapability:" + getMaxContainerCapability().getMemory();
+ LOG.info(diagMsg);
+ eventHandler.handle(new JobDiagnosticsUpdateEvent(
+ getJob().getID(), diagMsg));
+ eventHandler.handle(new JobEvent(getJob().getID(), JobEventType.JOB_KILL));
+ }
+ }
+ //set the rounded off memory
+ reqEvent.getCapability().setMemory(mapResourceReqt);
+ scheduledRequests.addMap(reqEvent);//maps are immediately scheduled
+ } else {
+ if (reduceResourceReqt == 0) {
+ reduceResourceReqt = reqEvent.getCapability().getMemory();
+ int minSlotMemSize = getMinContainerCapability().getMemory();
+ //round off on slotsize
+ reduceResourceReqt = (int) Math.ceil((float) reduceResourceReqt/minSlotMemSize) * minSlotMemSize;
+ LOG.info("reduceResourceReqt:"+reduceResourceReqt);
+ if (reduceResourceReqt > getMaxContainerCapability().getMemory()) {
+ String diagMsg = "REDUCE capability required is more than the supported " +
+ "max container capability in the cluster. Killing the Job. reduceResourceReqt: " +
+ reduceResourceReqt + " maxContainerCapability:" + getMaxContainerCapability().getMemory();
+ LOG.info(diagMsg);
+ eventHandler.handle(new JobDiagnosticsUpdateEvent(
+ getJob().getID(), diagMsg));
+ eventHandler.handle(new JobEvent(getJob().getID(), JobEventType.JOB_KILL));
+ }
+ }
+ //set the rounded off memory
+ reqEvent.getCapability().setMemory(reduceResourceReqt);
+ if (reqEvent.getEarlierAttemptFailed()) {
+ //add to the front of queue for fail fast
+ pendingReduces.addFirst(new ContainerRequest(reqEvent, PRIORITY_REDUCE));
+ } else {
+ pendingReduces.add(new ContainerRequest(reqEvent, PRIORITY_REDUCE));//reduces are added to pending and are slowly ramped up
+ }
+ }
+
+ } else if (
+ event.getType() == ContainerAllocator.EventType.CONTAINER_DEALLOCATE) {
+ TaskAttemptId aId = event.getAttemptID();
+
+ boolean removed = scheduledRequests.remove(aId);
+ if (!removed) {
+ ContainerId containerId = assignedRequests.get(aId);
+ if (containerId != null) {
+ removed = true;
+ assignedRequests.remove(aId);
+ containersReleased++;
+ release(containerId);
+ }
+ }
+ if (!removed) {
+ LOG.error("Could not deallocate container for task attemptId " +
+ aId);
+ }
+ } else if (
+ event.getType() == ContainerAllocator.EventType.CONTAINER_FAILED) {
+ ContainerFailedEvent fEv = (ContainerFailedEvent) event;
+ String host = getHost(fEv.getContMgrAddress());
+ containerFailedOnHost(host);
+ }
+ }
+
+ private static String getHost(String contMgrAddress) {
+ String host = contMgrAddress;
+ String[] hostport = host.split(":");
+ if (hostport.length == 2) {
+ host = hostport[0];
+ }
+ return host;
+ }
+
+ private void preemptReducesIfNeeded() {
+ if (reduceResourceReqt == 0) {
+ return; //no reduces
+ }
+ //check if reduces have taken over the whole cluster and there are
+ //unassigned maps
+ if (scheduledRequests.maps.size() > 0) {
+ int memLimit = getMemLimit();
+ int availableMemForMap = memLimit - ((assignedRequests.reduces.size() -
+ assignedRequests.preemptionWaitingReduces.size()) * reduceResourceReqt);
+ //availableMemForMap must be sufficient to run atleast 1 map
+ if (availableMemForMap < mapResourceReqt) {
+ //to make sure new containers are given to maps and not reduces
+ //ramp down all scheduled reduces if any
+ //(since reduces are scheduled at higher priority than maps)
+ LOG.info("Ramping down all scheduled reduces:" + scheduledRequests.reduces.size());
+ for (ContainerRequest req : scheduledRequests.reduces.values()) {
+ pendingReduces.add(req);
+ }
+ scheduledRequests.reduces.clear();
+
+ //preempt for making space for atleast one map
+ int premeptionLimit = Math.max(mapResourceReqt,
+ (int) (maxReducePreemptionLimit * memLimit));
+
+ int preemptMem = Math.min(scheduledRequests.maps.size() * mapResourceReqt,
+ premeptionLimit);
+
+ int toPreempt = (int) Math.ceil((float) preemptMem/reduceResourceReqt);
+ toPreempt = Math.min(toPreempt, assignedRequests.reduces.size());
+
+ LOG.info("Going to preempt " + toPreempt);
+ assignedRequests.preemptReduce(toPreempt);
+ }
+ }
+ }
+
+ private void scheduleReduces() {
+
+ if (pendingReduces.size() == 0) {
+ return;
+ }
+
+ LOG.info("Recalculating schedule...");
+
+ //if all maps are assigned, then ramp up all reduces irrespective of the
+ //headroom
+ if (scheduledRequests.maps.size() == 0 && pendingReduces.size() > 0) {
+ LOG.info("All maps assigned. Ramping up all remaining reduces:" + pendingReduces.size());
+ for (ContainerRequest req : pendingReduces) {
+ scheduledRequests.addReduce(req);
+ }
+ pendingReduces.clear();
+ return;
+ }
+
+
+ int totalMaps = assignedRequests.maps.size() + completedMaps + scheduledRequests.maps.size();
+
+ //check for slow start
+ if (!reduceStarted) {//not set yet
+ int completedMapsForReduceSlowstart = (int)Math.ceil(reduceSlowStart *
+ totalMaps);
+ if(completedMaps < completedMapsForReduceSlowstart) {
+ LOG.info("Reduce slow start threshold not met. " +
+ "completedMapsForReduceSlowstart " + completedMapsForReduceSlowstart);
+ return;
+ } else {
+ LOG.info("Reduce slow start threshold reached. Scheduling reduces.");
+ reduceStarted = true;
+ }
+ }
+
+ float completedMapPercent = 0f;
+ if (totalMaps != 0) {//support for 0 maps
+ completedMapPercent = (float)completedMaps/totalMaps;
+ } else {
+ completedMapPercent = 1;
+ }
+
+ int netScheduledMapMem = scheduledRequests.maps.size() * mapResourceReqt
+ + assignedRequests.maps.size() * mapResourceReqt;
+
+ int netScheduledReduceMem = scheduledRequests.reduces.size()
+ * reduceResourceReqt + assignedRequests.reduces.size()
+ * reduceResourceReqt;
+
+ int finalMapMemLimit = 0;
+ int finalReduceMemLimit = 0;
+
+ // ramp up the reduces based on completed map percentage
+ int totalMemLimit = getMemLimit();
+ int idealReduceMemLimit = Math.min((int)(completedMapPercent * totalMemLimit),
+ (int) (maxReduceRampupLimit * totalMemLimit));
+ int idealMapMemLimit = totalMemLimit - idealReduceMemLimit;
+
+ // check if there aren't enough maps scheduled, give the free map capacity
+ // to reduce
+ if (idealMapMemLimit > netScheduledMapMem) {
+ int unusedMapMemLimit = idealMapMemLimit - netScheduledMapMem;
+ finalReduceMemLimit = idealReduceMemLimit + unusedMapMemLimit;
+ finalMapMemLimit = totalMemLimit - finalReduceMemLimit;
+ } else {
+ finalMapMemLimit = idealMapMemLimit;
+ finalReduceMemLimit = idealReduceMemLimit;
+ }
+
+ LOG.info("completedMapPercent " + completedMapPercent +
+ " totalMemLimit:" + totalMemLimit +
+ " finalMapMemLimit:" + finalMapMemLimit +
+ " finalReduceMemLimit:" + finalReduceMemLimit +
+ " netScheduledMapMem:" + netScheduledMapMem +
+ " netScheduledReduceMem:" + netScheduledReduceMem);
+
+ int rampUp = (finalReduceMemLimit - netScheduledReduceMem)
+ / reduceResourceReqt;
+
+ if (rampUp > 0) {
+ rampUp = Math.min(rampUp, pendingReduces.size());
+ LOG.info("Ramping up " + rampUp);
+ //more reduce to be scheduled
+ for (int i = 0; i < rampUp; i++) {
+ ContainerRequest request = pendingReduces.removeFirst();
+ scheduledRequests.addReduce(request);
+ }
+ } else if (rampUp < 0){
+ int rampDown = -1 * rampUp;
+ rampDown = Math.min(rampDown, scheduledRequests.reduces.size());
+ LOG.info("Ramping down " + rampDown);
+ //remove from the scheduled and move back to pending
+ for (int i = 0; i < rampDown; i++) {
+ ContainerRequest request = scheduledRequests.removeReduce();
+ pendingReduces.add(request);
+ }
+ }
+ }
+
+ /**
+ * Synchronized to avoid findbugs warnings
+ */
+ private synchronized String getStat() {
+ return "PendingReduces:" + pendingReduces.size() +
+ " ScheduledMaps:" + scheduledRequests.maps.size() +
+ " ScheduledReduces:" + scheduledRequests.reduces.size() +
+ " AssignedMaps:" + assignedRequests.maps.size() +
+ " AssignedReduces:" + assignedRequests.reduces.size() +
+ " completedMaps:" + completedMaps +
+ " completedReduces:" + completedReduces +
+ " containersAllocated:" + containersAllocated +
+ " containersReleased:" + containersReleased +
+ " hostLocalAssigned:" + hostLocalAssigned +
+ " rackLocalAssigned:" + rackLocalAssigned +
+ " availableResources(headroom):" + getAvailableResources();
+ }
+
+ private List<Container> getResources() throws Exception {
+ int headRoom = getAvailableResources() != null ? getAvailableResources().getMemory() : 0;//first time it would be null
+ AMResponse response = makeRemoteRequest();
+ int newHeadRoom = getAvailableResources() != null ? getAvailableResources().getMemory() : 0;
+ List<Container> newContainers = response.getNewContainerList();
+ List<Container> finishedContainers = response.getFinishedContainerList();
+ if (newContainers.size() + finishedContainers.size() > 0 || headRoom != newHeadRoom) {
+ //something changed
+ recalculateReduceSchedule = true;
+ }
+
+ List<Container> allocatedContainers = new ArrayList<Container>();
+ for (Container cont : newContainers) {
+ allocatedContainers.add(cont);
+ LOG.debug("Received new Container :" + cont);
+ }
+ for (Container cont : finishedContainers) {
+ LOG.info("Received completed container " + cont);
+ TaskAttemptId attemptID = assignedRequests.get(cont.getId());
+ if (attemptID == null) {
+ LOG.error("Container complete event for unknown container id "
+ + cont.getId());
+ } else {
+ assignedRequests.remove(attemptID);
+ if (attemptID.getTaskId().getTaskType().equals(TaskType.MAP)) {
+ completedMaps++;
+ } else {
+ completedReduces++;
+ }
+ // send the container completed event to Task attempt
+ eventHandler.handle(new TaskAttemptEvent(attemptID,
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED));
+ // Send the diagnostics
+ String diagnostics = cont.getContainerStatus().getDiagnostics();
+ eventHandler.handle(new TaskAttemptDiagnosticsUpdateEvent(attemptID,
+ diagnostics));
+ }
+ }
+ return newContainers;
+ }
+
+ private int getMemLimit() {
+ int headRoom = getAvailableResources() != null ? getAvailableResources().getMemory() : 0;
+ return headRoom + assignedRequests.maps.size() * mapResourceReqt +
+ assignedRequests.reduces.size() * reduceResourceReqt;
+ }
+
+ private class ScheduledRequests {
+
+ private final LinkedList<TaskAttemptId> earlierFailedMaps =
+ new LinkedList<TaskAttemptId>();
+
+ /** Maps from a host to a list of Map tasks with data on the host */
+ private final Map<String, LinkedList<TaskAttemptId>> mapsHostMapping =
+ new HashMap<String, LinkedList<TaskAttemptId>>();
+ private final Map<String, LinkedList<TaskAttemptId>> mapsRackMapping =
+ new HashMap<String, LinkedList<TaskAttemptId>>();
+ private final Map<TaskAttemptId, ContainerRequest> maps =
+ new LinkedHashMap<TaskAttemptId, ContainerRequest>();
+
+ private final LinkedHashMap<TaskAttemptId, ContainerRequest> reduces =
+ new LinkedHashMap<TaskAttemptId, ContainerRequest>();
+
+ boolean remove(TaskAttemptId tId) {
+ ContainerRequest req = null;
+ if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) {
+ req = maps.remove(tId);
+ } else {
+ req = reduces.remove(tId);
+ }
+
+ if (req == null) {
+ return false;
+ } else {
+ decContainerReq(req);
+ return true;
+ }
+ }
+
+ ContainerRequest removeReduce() {
+ Iterator<Entry<TaskAttemptId, ContainerRequest>> it = reduces.entrySet().iterator();
+ if (it.hasNext()) {
+ Entry<TaskAttemptId, ContainerRequest> entry = it.next();
+ it.remove();
+ decContainerReq(entry.getValue());
+ return entry.getValue();
+ }
+ return null;
+ }
+
+ void addMap(ContainerRequestEvent event) {
+ ContainerRequest request = null;
+
+ if (event.getEarlierAttemptFailed()) {
+ earlierFailedMaps.add(event.getAttemptID());
+ request = new ContainerRequest(event, PRIORITY_FAST_FAIL_MAP);
+ } else {
+ for (String host : event.getHosts()) {
+ //host comes from data splitLocations which are hostnames. Containers
+ // use IP addresses.
+ //TODO Temporary fix for locality. Use resolvers from h-common.
+ // Cache to make this more efficient ?
+ InetAddress addr = null;
+ try {
+ addr = InetAddress.getByName(host);
+ } catch (UnknownHostException e) {
+ LOG.warn("Unable to resolve host to IP for host [: " + host + "]");
+ }
+ if (addr != null) //Fallback to host if resolve fails.
+ host = addr.getHostAddress();
+ LinkedList<TaskAttemptId> list = mapsHostMapping.get(host);
+ if (list == null) {
+ list = new LinkedList<TaskAttemptId>();
+ mapsHostMapping.put(host, list);
+ }
+ list.add(event.getAttemptID());
+ LOG.info("Added attempt req to host " + host);
+ }
+ for (String rack: event.getRacks()) {
+ LinkedList<TaskAttemptId> list = mapsRackMapping.get(rack);
+ if (list == null) {
+ list = new LinkedList<TaskAttemptId>();
+ mapsRackMapping.put(rack, list);
+ }
+ list.add(event.getAttemptID());
+ LOG.info("Added attempt req to rack " + rack);
+ }
+ request = new ContainerRequest(event, PRIORITY_MAP);
+ }
+ maps.put(event.getAttemptID(), request);
+ addContainerReq(request);
+ }
+
+
+ void addReduce(ContainerRequest req) {
+ reduces.put(req.attemptID, req);
+ addContainerReq(req);
+ }
+
+ private void assign(List<Container> allocatedContainers) {
+ Iterator<Container> it = allocatedContainers.iterator();
+ LOG.info("Got allocated containers " + allocatedContainers.size());
+ containersAllocated += allocatedContainers.size();
+ while (it.hasNext()) {
+ Container allocated = it.next();
+ LOG.info("Assigning container " + allocated);
+ ContainerRequest assigned = assign(allocated);
+
+ if (assigned != null) {
+ // Update resource requests
+ decContainerReq(assigned);
+
+ // send the container-assigned event to task attempt
+ eventHandler.handle(new TaskAttemptContainerAssignedEvent(
+ assigned.attemptID, allocated));
+
+ assignedRequests.add(allocated.getId(), assigned.attemptID);
+
+ LOG.info("Assigned container (" + allocated + ") " +
+ " to task " + assigned.attemptID +
+ " on node " + allocated.getNodeId().toString());
+ } else {
+ //not assigned to any request, release the container
+ LOG.info("Releasing unassigned and invalid container " + allocated
+ + ". RM has gone crazy, someone go look!"
+ + " Hey RM, if you are so rich, go donate to non-profits!");
+ containersReleased++;
+ release(allocated.getId());
+ }
+ }
+ }
+
+ private ContainerRequest assign(Container allocated) {
+ ContainerRequest assigned = null;
+
+ if (mapResourceReqt != reduceResourceReqt) {
+ //assign based on size
+ LOG.info("Assigning based on container size");
+ if (allocated.getResource().getMemory() == mapResourceReqt) {
+ assigned = assignToFailedMap(allocated);
+ if (assigned == null) {
+ assigned = assignToMap(allocated);
+ }
+ } else if (allocated.getResource().getMemory() == reduceResourceReqt) {
+ assigned = assignToReduce(allocated);
+ }
+
+ return assigned;
+ }
+
+ //container can be given to either map or reduce
+ //assign based on priority
+
+ //try to assign to earlierFailedMaps if present
+ assigned = assignToFailedMap(allocated);
+
+ //Assign to reduces before assigning to maps ?
+ if (assigned == null) {
+ assigned = assignToReduce(allocated);
+ }
+
+ //try to assign to maps if present
+ if (assigned == null) {
+ assigned = assignToMap(allocated);
+ }
+
+ return assigned;
+ }
+
+
+ private ContainerRequest assignToFailedMap(Container allocated) {
+ //try to assign to earlierFailedMaps if present
+ ContainerRequest assigned = null;
+ while (assigned == null && earlierFailedMaps.size() > 0 &&
+ allocated.getResource().getMemory() >= mapResourceReqt) {
+ TaskAttemptId tId = earlierFailedMaps.removeFirst();
+ if (maps.containsKey(tId)) {
+ assigned = maps.remove(tId);
+ JobCounterUpdateEvent jce =
+ new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
+ jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
+ eventHandler.handle(jce);
+ LOG.info("Assigned from earlierFailedMaps");
+ break;
+ }
+ }
+ return assigned;
+ }
+
+ private ContainerRequest assignToReduce(Container allocated) {
+ ContainerRequest assigned = null;
+ //try to assign to reduces if present
+ if (assigned == null && reduces.size() > 0
+ && allocated.getResource().getMemory() >= reduceResourceReqt) {
+ TaskAttemptId tId = reduces.keySet().iterator().next();
+ assigned = reduces.remove(tId);
+ LOG.info("Assigned to reduce");
+ }
+ return assigned;
+ }
+
+ private ContainerRequest assignToMap(Container allocated) {
+ //try to assign to maps if present
+ //first by host, then by rack, followed by *
+ ContainerRequest assigned = null;
+ while (assigned == null && maps.size() > 0
+ && allocated.getResource().getMemory() >= mapResourceReqt) {
+ String host = getHost(allocated.getNodeId().toString());
+ LinkedList<TaskAttemptId> list = mapsHostMapping.get(host);
+ while (list != null && list.size() > 0) {
+ LOG.info("Host matched to the request list " + host);
+ TaskAttemptId tId = list.removeFirst();
+ if (maps.containsKey(tId)) {
+ assigned = maps.remove(tId);
+ JobCounterUpdateEvent jce =
+ new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
+ jce.addCounterUpdate(JobCounter.DATA_LOCAL_MAPS, 1);
+ eventHandler.handle(jce);
+ hostLocalAssigned++;
+ LOG.info("Assigned based on host match " + host);
+ break;
+ }
+ }
+ if (assigned == null) {
+ String rack = RackResolver.resolve(host).getNetworkLocation();
+ list = mapsRackMapping.get(rack);
+ while (list != null && list.size() > 0) {
+ TaskAttemptId tId = list.removeFirst();
+ if (maps.containsKey(tId)) {
+ assigned = maps.remove(tId);
+ JobCounterUpdateEvent jce =
+ new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
+ jce.addCounterUpdate(JobCounter.RACK_LOCAL_MAPS, 1);
+ eventHandler.handle(jce);
+ rackLocalAssigned++;
+ LOG.info("Assigned based on rack match " + rack);
+ break;
+ }
+ }
+ if (assigned == null && maps.size() > 0) {
+ TaskAttemptId tId = maps.keySet().iterator().next();
+ assigned = maps.remove(tId);
+ JobCounterUpdateEvent jce =
+ new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
+ jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
+ eventHandler.handle(jce);
+ LOG.info("Assigned based on * match");
+ break;
+ }
+ }
+ }
+ return assigned;
+ }
+ }
+
+ private class AssignedRequests {
+ private final Map<ContainerId, TaskAttemptId> containerToAttemptMap =
+ new HashMap<ContainerId, TaskAttemptId>();
+ private final LinkedHashMap<TaskAttemptId, ContainerId> maps =
+ new LinkedHashMap<TaskAttemptId, ContainerId>();
+ private final LinkedHashMap<TaskAttemptId, ContainerId> reduces =
+ new LinkedHashMap<TaskAttemptId, ContainerId>();
+ private final Set<TaskAttemptId> preemptionWaitingReduces =
+ new HashSet<TaskAttemptId>();
+
+ void add(ContainerId containerId, TaskAttemptId tId) {
+ LOG.info("Assigned container " + containerId.toString()
+ + " to " + tId);
+ containerToAttemptMap.put(containerId, tId);
+ if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) {
+ maps.put(tId, containerId);
+ } else {
+ reduces.put(tId, containerId);
+ }
+ }
+
+ void preemptReduce(int toPreempt) {
+ List<TaskAttemptId> reduceList = new ArrayList(reduces.keySet());
+ //sort reduces on progress
+ Collections.sort(reduceList,
+ new Comparator<TaskAttemptId>() {
+ @Override
+ public int compare(TaskAttemptId o1, TaskAttemptId o2) {
+ float p = getJob().getTask(o1.getTaskId()).getAttempt(o1).getProgress() -
+ getJob().getTask(o2.getTaskId()).getAttempt(o2).getProgress();
+ return p >= 0 ? 1 : -1;
+ }
+ });
+
+ for (int i = 0; i < toPreempt && reduceList.size() > 0; i++) {
+ TaskAttemptId id = reduceList.remove(0);//remove the one on top
+ LOG.info("Preempting " + id);
+ preemptionWaitingReduces.add(id);
+ eventHandler.handle(new TaskAttemptEvent(id, TaskAttemptEventType.TA_KILL));
+ }
+ }
+
+ boolean remove(TaskAttemptId tId) {
+ ContainerId containerId = null;
+ if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) {
+ containerId = maps.remove(tId);
+ } else {
+ containerId = reduces.remove(tId);
+ if (containerId != null) {
+ boolean preempted = preemptionWaitingReduces.remove(tId);
+ if (preempted) {
+ LOG.info("Reduce preemption successful " + tId);
+ }
+ }
+ }
+
+ if (containerId != null) {
+ containerToAttemptMap.remove(containerId);
+ return true;
+ }
+ return false;
+ }
+
+ TaskAttemptId get(ContainerId cId) {
+ return containerToAttemptMap.get(cId);
+ }
+
+ ContainerId get(TaskAttemptId tId) {
+ if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) {
+ return maps.get(tId);
+ } else {
+ return reduces.get(tId);
+ }
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
new file mode 100644
index 0000000..024bac1
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
@@ -0,0 +1,274 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.rm;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.app.AMConstants;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.AMResponse;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+
+/**
+ * Keeps the data structures to send container requests to RM.
+ */
+public abstract class RMContainerRequestor extends RMCommunicator {
+
+ private static final Log LOG = LogFactory.getLog(RMContainerRequestor.class);
+ static final String ANY = "*";
+
+ private int lastResponseID;
+ private Resource availableResources;
+
+ private final RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(null);
+ //Key -> Priority
+ //Value -> Map
+ //Key->ResourceName (e.g., hostname, rackname, *)
+ //Value->Map
+ //Key->Resource Capability
+ //Value->ResourceReqeust
+ private final Map<Priority, Map<String, Map<Resource, ResourceRequest>>>
+ remoteRequestsTable =
+ new TreeMap<Priority, Map<String, Map<Resource, ResourceRequest>>>();
+
+ private final Set<ResourceRequest> ask = new TreeSet<ResourceRequest>();
+ private final Set<ContainerId> release = new TreeSet<ContainerId>();
+
+ private boolean nodeBlacklistingEnabled;
+ private int maxTaskFailuresPerNode;
+ private final Map<String, Integer> nodeFailures = new HashMap<String, Integer>();
+ private final Set<String> blacklistedNodes = new HashSet<String>();
+
+ public RMContainerRequestor(ClientService clientService, AppContext context) {
+ super(clientService, context);
+ }
+
+ static class ContainerRequest {
+ final TaskAttemptId attemptID;
+ final Resource capability;
+ final String[] hosts;
+ final String[] racks;
+ //final boolean earlierAttemptFailed;
+ final Priority priority;
+ public ContainerRequest(ContainerRequestEvent event, Priority priority) {
+ this.attemptID = event.getAttemptID();
+ this.capability = event.getCapability();
+ this.hosts = event.getHosts();
+ this.racks = event.getRacks();
+ //this.earlierAttemptFailed = event.getEarlierAttemptFailed();
+ this.priority = priority;
+ }
+ }
+
+ @Override
+ public void init(Configuration conf) {
+ super.init(conf);
+ nodeBlacklistingEnabled =
+ conf.getBoolean(AMConstants.NODE_BLACKLISTING_ENABLE, true);
+ LOG.info("nodeBlacklistingEnabled:" + nodeBlacklistingEnabled);
+ maxTaskFailuresPerNode =
+ conf.getInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, 3);
+ LOG.info("maxTaskFailuresPerNode is " + maxTaskFailuresPerNode);
+ }
+
+ protected abstract void heartbeat() throws Exception;
+
+ protected AMResponse makeRemoteRequest() throws YarnRemoteException {
+ AllocateRequest allocateRequest = recordFactory
+ .newRecordInstance(AllocateRequest.class);
+ allocateRequest.setApplicationAttemptId(applicationAttemptId);
+ allocateRequest.setResponseId(lastResponseID);
+ allocateRequest.addAllAsks(new ArrayList<ResourceRequest>(ask));
+ allocateRequest.addAllReleases(new ArrayList<ContainerId>(release));
+ AllocateResponse allocateResponse = scheduler.allocate(allocateRequest);
+ AMResponse response = allocateResponse.getAMResponse();
+ lastResponseID = response.getResponseId();
+ availableResources = response.getAvailableResources();
+
+ LOG.info("getResources() for " + applicationId + ":" + " ask="
+ + ask.size() + " release= " + release.size() + " newContainers="
+ + response.getNewContainerCount() + " finishedContainers="
+ + response.getFinishedContainerCount()
+ + " resourcelimit=" + availableResources);
+
+ ask.clear();
+ release.clear();
+ return response;
+ }
+
+ protected void containerFailedOnHost(String hostName) {
+ if (!nodeBlacklistingEnabled) {
+ return;
+ }
+ if (blacklistedNodes.contains(hostName)) {
+ LOG.info("Host " + hostName + " is already blacklisted.");
+ return; //already blacklisted
+ }
+ Integer failures = nodeFailures.remove(hostName);
+ failures = failures == null ? 0 : failures;
+ failures++;
+ LOG.info(failures + " failures on node " + hostName);
+ if (failures >= maxTaskFailuresPerNode) {
+ blacklistedNodes.add(hostName);
+ LOG.info("Blacklisted host " + hostName);
+
+ //remove all the requests corresponding to this hostname
+ for (Map<String, Map<Resource, ResourceRequest>> remoteRequests
+ : remoteRequestsTable.values()){
+ //remove from host
+ Map<Resource, ResourceRequest> reqMap = remoteRequests.remove(hostName);
+ if (reqMap != null) {
+ for (ResourceRequest req : reqMap.values()) {
+ ask.remove(req);
+ }
+ }
+ //TODO: remove from rack
+ }
+ } else {
+ nodeFailures.put(hostName, failures);
+ }
+ }
+
+ protected Resource getAvailableResources() {
+ return availableResources;
+ }
+
+ protected void addContainerReq(ContainerRequest req) {
+ // Create resource requests
+ for (String host : req.hosts) {
+ // Data-local
+ addResourceRequest(req.priority, host, req.capability);
+ }
+
+ // Nothing Rack-local for now
+ for (String rack : req.racks) {
+ addResourceRequest(req.priority, rack, req.capability);
+ }
+
+ // Off-switch
+ addResourceRequest(req.priority, ANY, req.capability);
+ }
+
+ protected void decContainerReq(ContainerRequest req) {
+ // Update resource requests
+ for (String hostName : req.hosts) {
+ decResourceRequest(req.priority, hostName, req.capability);
+ }
+
+ for (String rack : req.racks) {
+ decResourceRequest(req.priority, rack, req.capability);
+ }
+
+ decResourceRequest(req.priority, ANY, req.capability);
+ }
+
+ private void addResourceRequest(Priority priority, String resourceName,
+ Resource capability) {
+ Map<String, Map<Resource, ResourceRequest>> remoteRequests =
+ this.remoteRequestsTable.get(priority);
+ if (remoteRequests == null) {
+ remoteRequests = new HashMap<String, Map<Resource, ResourceRequest>>();
+ this.remoteRequestsTable.put(priority, remoteRequests);
+ LOG.info("Added priority=" + priority);
+ }
+ Map<Resource, ResourceRequest> reqMap = remoteRequests.get(resourceName);
+ if (reqMap == null) {
+ reqMap = new HashMap<Resource, ResourceRequest>();
+ remoteRequests.put(resourceName, reqMap);
+ }
+ ResourceRequest remoteRequest = reqMap.get(capability);
+ if (remoteRequest == null) {
+ remoteRequest = recordFactory.newRecordInstance(ResourceRequest.class);
+ remoteRequest.setPriority(priority);
+ remoteRequest.setHostName(resourceName);
+ remoteRequest.setCapability(capability);
+ remoteRequest.setNumContainers(0);
+ reqMap.put(capability, remoteRequest);
+ }
+ remoteRequest.setNumContainers(remoteRequest.getNumContainers() + 1);
+
+ // Note this down for next interaction with ResourceManager
+ ask.add(remoteRequest);
+ LOG.info("addResourceRequest:" + " applicationId=" + applicationId.getId()
+ + " priority=" + priority.getPriority() + " resourceName=" + resourceName
+ + " numContainers=" + remoteRequest.getNumContainers() + " #asks="
+ + ask.size());
+ }
+
+ private void decResourceRequest(Priority priority, String resourceName,
+ Resource capability) {
+ Map<String, Map<Resource, ResourceRequest>> remoteRequests =
+ this.remoteRequestsTable.get(priority);
+ Map<Resource, ResourceRequest> reqMap = remoteRequests.get(resourceName);
+ ResourceRequest remoteRequest = reqMap.get(capability);
+
+ LOG.info("BEFORE decResourceRequest:" + " applicationId=" + applicationId.getId()
+ + " priority=" + priority.getPriority() + " resourceName=" + resourceName
+ + " numContainers=" + remoteRequest.getNumContainers() + " #asks="
+ + ask.size());
+
+ remoteRequest.setNumContainers(remoteRequest.getNumContainers() -1);
+ if (remoteRequest.getNumContainers() == 0) {
+ reqMap.remove(capability);
+ if (reqMap.size() == 0) {
+ remoteRequests.remove(resourceName);
+ }
+ if (remoteRequests.size() == 0) {
+ remoteRequestsTable.remove(priority);
+ }
+ //remove from ask if it may have
+ ask.remove(remoteRequest);
+ } else {
+ ask.add(remoteRequest);//this will override the request if ask doesn't
+ //already have it.
+ }
+
+ LOG.info("AFTER decResourceRequest:" + " applicationId="
+ + applicationId.getId() + " priority=" + priority.getPriority()
+ + " resourceName=" + resourceName + " numContainers="
+ + remoteRequest.getNumContainers() + " #asks=" + ask.size());
+ }
+
+ protected void release(ContainerId containerId) {
+ release.add(containerId);
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DataStatistics.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DataStatistics.java
new file mode 100644
index 0000000..cfaffaf
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DataStatistics.java
@@ -0,0 +1,78 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.speculate;
+
+public class DataStatistics {
+ private int count = 0;
+ private double sum = 0;
+ private double sumSquares = 0;
+
+ public DataStatistics() {
+ }
+
+ public DataStatistics(double initNum) {
+ this.count = 1;
+ this.sum = initNum;
+ this.sumSquares = initNum * initNum;
+ }
+
+ public synchronized void add(double newNum) {
+ this.count++;
+ this.sum += newNum;
+ this.sumSquares += newNum * newNum;
+ }
+
+ public synchronized void updateStatistics(double old, double update) {
+ this.sum += update - old;
+ this.sumSquares += (update * update) - (old * old);
+ }
+
+ public synchronized double mean() {
+ return count == 0 ? 0.0 : sum/count;
+ }
+
+ public synchronized double var() {
+ // E(X^2) - E(X)^2
+ if (count <= 1) {
+ return 0.0;
+ }
+ double mean = mean();
+ return Math.max((sumSquares/count) - mean * mean, 0.0d);
+ }
+
+ public synchronized double std() {
+ return Math.sqrt(this.var());
+ }
+
+ public synchronized double outlier(float sigma) {
+ if (count != 0.0) {
+ return mean() + std() * sigma;
+ }
+
+ return 0.0;
+ }
+
+ public synchronized double count() {
+ return count;
+ }
+
+ public String toString() {
+ return "DataStatistics: count is " + count + ", sum is " + sum +
+ ", sumSquares is " + sumSquares + " mean is " + mean() + " std() is " + std();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java
new file mode 100644
index 0000000..a51a3e7
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java
@@ -0,0 +1,512 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.speculate;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.v2.MRConstants;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.AMConstants;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.service.AbstractService;
+
+
+public class DefaultSpeculator extends AbstractService implements
+ Speculator {
+
+ private static final long ON_SCHEDULE = Long.MIN_VALUE;
+ private static final long ALREADY_SPECULATING = Long.MIN_VALUE + 1;
+ private static final long TOO_NEW = Long.MIN_VALUE + 2;
+ private static final long PROGRESS_IS_GOOD = Long.MIN_VALUE + 3;
+ private static final long NOT_RUNNING = Long.MIN_VALUE + 4;
+ private static final long TOO_LATE_TO_SPECULATE = Long.MIN_VALUE + 5;
+
+ private static final long SOONEST_RETRY_AFTER_NO_SPECULATE = 1000L * 1L;
+ private static final long SOONEST_RETRY_AFTER_SPECULATE = 1000L * 15L;
+
+ private static final double PROPORTION_RUNNING_TASKS_SPECULATABLE = 0.1;
+ private static final double PROPORTION_TOTAL_TASKS_SPECULATABLE = 0.01;
+ private static final int MINIMUM_ALLOWED_SPECULATIVE_TASKS = 10;
+
+ private static final Log LOG = LogFactory.getLog(DefaultSpeculator.class);
+
+ private final ConcurrentMap<TaskId, Boolean> runningTasks
+ = new ConcurrentHashMap<TaskId, Boolean>();
+
+ private final Map<Task, AtomicBoolean> pendingSpeculations
+ = new ConcurrentHashMap<Task, AtomicBoolean>();
+
+ // These are the current needs, not the initial needs. For each job, these
+ // record the number of attempts that exist and that are actively
+ // waiting for a container [as opposed to running or finished]
+ private final ConcurrentMap<JobId, AtomicInteger> mapContainerNeeds
+ = new ConcurrentHashMap<JobId, AtomicInteger>();
+ private final ConcurrentMap<JobId, AtomicInteger> reduceContainerNeeds
+ = new ConcurrentHashMap<JobId, AtomicInteger>();
+
+ private final Set<TaskId> mayHaveSpeculated = new HashSet();
+
+ private final Configuration conf;
+ private AppContext context;
+ private Thread speculationBackgroundThread = null;
+ private BlockingQueue<SpeculatorEvent> eventQueue
+ = new LinkedBlockingQueue<SpeculatorEvent>();
+ private TaskRuntimeEstimator estimator;
+
+ private BlockingQueue<Object> scanControl = new LinkedBlockingQueue<Object>();
+
+ private final Clock clock;
+
+ private final EventHandler<TaskEvent> eventHandler;
+
+ public DefaultSpeculator(Configuration conf, AppContext context) {
+ this(conf, context, context.getClock());
+ }
+
+ public DefaultSpeculator(Configuration conf, AppContext context, Clock clock) {
+ this(conf, context, getEstimator(conf, context), clock);
+ }
+
+ static private TaskRuntimeEstimator getEstimator
+ (Configuration conf, AppContext context) {
+ TaskRuntimeEstimator estimator;
+
+ try {
+ // "yarn.mapreduce.job.task.runtime.estimator.class"
+ Class<? extends TaskRuntimeEstimator> estimatorClass
+ = conf.getClass(AMConstants.TASK_RUNTIME_ESTIMATOR_CLASS,
+ LegacyTaskRuntimeEstimator.class,
+ TaskRuntimeEstimator.class);
+
+ Constructor<? extends TaskRuntimeEstimator> estimatorConstructor
+ = estimatorClass.getConstructor();
+
+ estimator = estimatorConstructor.newInstance();
+
+ estimator.contextualize(conf, context);
+ } catch (InstantiationException ex) {
+ LOG.error("Can't make a speculation runtime extimator" + ex);
+ throw new YarnException(ex);
+ } catch (IllegalAccessException ex) {
+ LOG.error("Can't make a speculation runtime extimator" + ex);
+ throw new YarnException(ex);
+ } catch (InvocationTargetException ex) {
+ LOG.error("Can't make a speculation runtime extimator" + ex);
+ throw new YarnException(ex);
+ } catch (NoSuchMethodException ex) {
+ LOG.error("Can't make a speculation runtime extimator" + ex);
+ throw new YarnException(ex);
+ }
+
+ return estimator;
+ }
+
+ // This constructor is designed to be called by other constructors.
+ // However, it's public because we do use it in the test cases.
+ // Normally we figure out our own estimator.
+ public DefaultSpeculator
+ (Configuration conf, AppContext context,
+ TaskRuntimeEstimator estimator, Clock clock) {
+ super(DefaultSpeculator.class.getName());
+
+ this.conf = conf;
+ this.context = context;
+ this.estimator = estimator;
+ this.clock = clock;
+ this.eventHandler = context.getEventHandler();
+ }
+
+/* ************************************************************* */
+
+ // This is the task-mongering that creates the two new threads -- one for
+ // processing events from the event queue and one for periodically
+ // looking for speculation opportunities
+
+ @Override
+ public void start() {
+ Runnable speculationBackgroundCore
+ = new Runnable() {
+ @Override
+ public void run() {
+ while (!Thread.currentThread().isInterrupted()) {
+ long backgroundRunStartTime = clock.getTime();
+ try {
+ int speculations = computeSpeculations();
+ long mininumRecomp
+ = speculations > 0 ? SOONEST_RETRY_AFTER_SPECULATE
+ : SOONEST_RETRY_AFTER_NO_SPECULATE;
+
+ long wait = Math.max(mininumRecomp,
+ clock.getTime() - backgroundRunStartTime);
+
+ if (speculations > 0) {
+ LOG.info("We launched " + speculations
+ + " speculations. Sleeping " + wait + " milliseconds.");
+ }
+
+ Object pollResult
+ = scanControl.poll(wait, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException e) {
+ LOG.error("Background thread returning, interrupted : " + e);
+ e.printStackTrace(System.out);
+ return;
+ }
+ }
+ }
+ };
+ speculationBackgroundThread = new Thread
+ (speculationBackgroundCore, "DefaultSpeculator background processing");
+ speculationBackgroundThread.start();
+
+ super.start();
+ }
+
+ @Override
+ public void stop() {
+ // this could be called before background thread is established
+ if (speculationBackgroundThread != null) {
+ speculationBackgroundThread.interrupt();
+ }
+ super.stop();
+ }
+
+ @Override
+ public void handleAttempt(TaskAttemptStatus status) {
+ long timestamp = clock.getTime();
+ statusUpdate(status, timestamp);
+ }
+
+ // This section is not part of the Speculator interface; it's used only for
+ // testing
+ public boolean eventQueueEmpty() {
+ return eventQueue.isEmpty();
+ }
+
+ // This interface is intended to be used only for test cases.
+ public void scanForSpeculations() {
+ LOG.info("We got asked to run a debug speculation scan.");
+ // debug
+ System.out.println("We got asked to run a debug speculation scan.");
+ System.out.println("There are " + scanControl.size()
+ + " events stacked already.");
+ scanControl.add(new Object());
+ Thread.yield();
+ }
+
+
+/* ************************************************************* */
+
+ // This section contains the code that gets run for a SpeculatorEvent
+
+ private AtomicInteger containerNeed(TaskId taskID) {
+ JobId jobID = taskID.getJobId();
+ TaskType taskType = taskID.getTaskType();
+
+ ConcurrentMap<JobId, AtomicInteger> relevantMap
+ = taskType == TaskType.MAP ? mapContainerNeeds : reduceContainerNeeds;
+
+ AtomicInteger result = relevantMap.get(jobID);
+
+ if (result == null) {
+ relevantMap.putIfAbsent(jobID, new AtomicInteger(0));
+ result = relevantMap.get(jobID);
+ }
+
+ return result;
+ }
+
+ private synchronized void processSpeculatorEvent(SpeculatorEvent event) {
+ switch (event.getType()) {
+ case ATTEMPT_STATUS_UPDATE:
+ statusUpdate(event.getReportedStatus(), event.getTimestamp());
+ break;
+
+ case TASK_CONTAINER_NEED_UPDATE:
+ {
+ AtomicInteger need = containerNeed(event.getTaskID());
+ need.addAndGet(event.containersNeededChange());
+ break;
+ }
+
+ case ATTEMPT_START:
+ {
+ LOG.info("ATTEMPT_START " + event.getTaskID());
+ estimator.enrollAttempt
+ (event.getReportedStatus(), event.getTimestamp());
+ break;
+ }
+
+ case JOB_CREATE:
+ {
+ LOG.info("JOB_CREATE " + event.getJobID());
+ estimator.contextualize(getConfig(), context);
+ break;
+ }
+ }
+ }
+
+ /**
+ * Absorbs one TaskAttemptStatus
+ *
+ * @param reportedStatus the status report that we got from a task attempt
+ * that we want to fold into the speculation data for this job
+ * @param timestamp the time this status corresponds to. This matters
+ * because statuses contain progress.
+ */
+ protected void statusUpdate(TaskAttemptStatus reportedStatus, long timestamp) {
+
+ String stateString = reportedStatus.taskState.toString();
+
+ TaskAttemptId attemptID = reportedStatus.id;
+ TaskId taskID = attemptID.getTaskId();
+ Job job = context.getJob(taskID.getJobId());
+
+ if (job == null) {
+ return;
+ }
+
+ Task task = job.getTask(taskID);
+
+ if (task == null) {
+ return;
+ }
+
+ estimator.updateAttempt(reportedStatus, timestamp);
+
+ // If the task is already known to be speculation-bait, don't do anything
+ if (pendingSpeculations.get(task) != null) {
+ if (pendingSpeculations.get(task).get()) {
+ return;
+ }
+ }
+
+ if (stateString.equals(TaskAttemptState.RUNNING.name())) {
+ runningTasks.putIfAbsent(taskID, Boolean.TRUE);
+ } else {
+ runningTasks.remove(taskID, Boolean.TRUE);
+ }
+ }
+
+/* ************************************************************* */
+
+// This is the code section that runs periodically and adds speculations for
+// those jobs that need them.
+
+
+ // This can return a few magic values for tasks that shouldn't speculate:
+ // returns ON_SCHEDULE if thresholdRuntime(taskID) says that we should not
+ // considering speculating this task
+ // returns ALREADY_SPECULATING if that is true. This has priority.
+ // returns TOO_NEW if our companion task hasn't gotten any information
+ // returns PROGRESS_IS_GOOD if the task is sailing through
+ // returns NOT_RUNNING if the task is not running
+ //
+ // All of these values are negative. Any value that should be allowed to
+ // speculate is 0 or positive.
+ private long speculationValue(TaskId taskID, long now) {
+ Job job = context.getJob(taskID.getJobId());
+ Task task = job.getTask(taskID);
+ Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
+ long acceptableRuntime = Long.MIN_VALUE;
+ long result = Long.MIN_VALUE;
+
+ if (!mayHaveSpeculated.contains(taskID)) {
+ acceptableRuntime = estimator.thresholdRuntime(taskID);
+ if (acceptableRuntime == Long.MAX_VALUE) {
+ return ON_SCHEDULE;
+ }
+ }
+
+ TaskAttemptId runningTaskAttemptID = null;
+
+ int numberRunningAttempts = 0;
+
+ for (TaskAttempt taskAttempt : attempts.values()) {
+ if (taskAttempt.getState() == TaskAttemptState.RUNNING
+ || taskAttempt.getState() == TaskAttemptState.ASSIGNED) {
+ if (++numberRunningAttempts > 1) {
+ return ALREADY_SPECULATING;
+ }
+ runningTaskAttemptID = taskAttempt.getID();
+
+ long estimatedRunTime = estimator.estimatedRuntime(runningTaskAttemptID);
+
+ long taskAttemptStartTime
+ = estimator.attemptEnrolledTime(runningTaskAttemptID);
+ if (taskAttemptStartTime > now) {
+ // This background process ran before we could process the task
+ // attempt status change that chronicles the attempt start
+ return TOO_NEW;
+ }
+
+ long estimatedEndTime = estimatedRunTime + taskAttemptStartTime;
+
+ long estimatedReplacementEndTime
+ = now + estimator.estimatedNewAttemptRuntime(taskID);
+
+ if (estimatedEndTime < now) {
+ return PROGRESS_IS_GOOD;
+ }
+
+ if (estimatedReplacementEndTime >= estimatedEndTime) {
+ return TOO_LATE_TO_SPECULATE;
+ }
+
+ result = estimatedEndTime - estimatedReplacementEndTime;
+ }
+ }
+
+ // If we are here, there's at most one task attempt.
+ if (numberRunningAttempts == 0) {
+ return NOT_RUNNING;
+ }
+
+
+
+ if (acceptableRuntime == Long.MIN_VALUE) {
+ acceptableRuntime = estimator.thresholdRuntime(taskID);
+ if (acceptableRuntime == Long.MAX_VALUE) {
+ return ON_SCHEDULE;
+ }
+ }
+
+ return result;
+ }
+
+ //Add attempt to a given Task.
+ protected void addSpeculativeAttempt(TaskId taskID) {
+ LOG.info
+ ("DefaultSpeculator.addSpeculativeAttempt -- we are speculating " + taskID);
+ eventHandler.handle(new TaskEvent(taskID, TaskEventType.T_ADD_SPEC_ATTEMPT));
+ mayHaveSpeculated.add(taskID);
+ }
+
+ @Override
+ public void handle(SpeculatorEvent event) {
+ processSpeculatorEvent(event);
+ }
+
+
+ private int maybeScheduleAMapSpeculation() {
+ return maybeScheduleASpeculation(TaskType.MAP);
+ }
+
+ private int maybeScheduleAReduceSpeculation() {
+ return maybeScheduleASpeculation(TaskType.REDUCE);
+ }
+
+ private int maybeScheduleASpeculation(TaskType type) {
+ int successes = 0;
+
+ long now = clock.getTime();
+
+ ConcurrentMap<JobId, AtomicInteger> containerNeeds
+ = type == TaskType.MAP ? mapContainerNeeds : reduceContainerNeeds;
+
+ for (ConcurrentMap.Entry<JobId, AtomicInteger> jobEntry : containerNeeds.entrySet()) {
+ // This race conditon is okay. If we skip a speculation attempt we
+ // should have tried because the event that lowers the number of
+ // containers needed to zero hasn't come through, it will next time.
+ // Also, if we miss the fact that the number of containers needed was
+ // zero but increased due to a failure it's not too bad to launch one
+ // container prematurely.
+ if (jobEntry.getValue().get() > 0) {
+ continue;
+ }
+
+ int numberSpeculationsAlready = 0;
+ int numberRunningTasks = 0;
+
+ // loop through the tasks of the kind
+ Job job = context.getJob(jobEntry.getKey());
+
+ Map<TaskId, Task> tasks = job.getTasks(type);
+
+ int numberAllowedSpeculativeTasks
+ = (int) Math.max(MINIMUM_ALLOWED_SPECULATIVE_TASKS,
+ PROPORTION_TOTAL_TASKS_SPECULATABLE * tasks.size());
+
+ TaskId bestTaskID = null;
+ long bestSpeculationValue = -1L;
+
+ // this loop is potentially pricey.
+ // TODO track the tasks that are potentially worth looking at
+ for (Map.Entry<TaskId, Task> taskEntry : tasks.entrySet()) {
+ long mySpeculationValue = speculationValue(taskEntry.getKey(), now);
+
+ if (mySpeculationValue == ALREADY_SPECULATING) {
+ ++numberSpeculationsAlready;
+ }
+
+ if (mySpeculationValue != NOT_RUNNING) {
+ ++numberRunningTasks;
+ }
+
+ if (mySpeculationValue > bestSpeculationValue) {
+ bestTaskID = taskEntry.getKey();
+ bestSpeculationValue = mySpeculationValue;
+ }
+ }
+ numberAllowedSpeculativeTasks
+ = (int) Math.max(numberAllowedSpeculativeTasks,
+ PROPORTION_RUNNING_TASKS_SPECULATABLE * numberRunningTasks);
+
+ // If we found a speculation target, fire it off
+ if (bestTaskID != null
+ && numberAllowedSpeculativeTasks > numberSpeculationsAlready) {
+ addSpeculativeAttempt(bestTaskID);
+ ++successes;
+ }
+ }
+
+ return successes;
+ }
+
+ private int computeSpeculations() {
+ // We'll try to issue one map and one reduce speculation per job per run
+ return maybeScheduleAMapSpeculation() + maybeScheduleAReduceSpeculation();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/ExponentiallySmoothedTaskRuntimeEstimator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/ExponentiallySmoothedTaskRuntimeEstimator.java
new file mode 100644
index 0000000..ff50bc2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/ExponentiallySmoothedTaskRuntimeEstimator.java
@@ -0,0 +1,195 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.speculate;
+
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.app.AMConstants;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
+
+/**
+ * This estimator exponentially smooths the rate of progress versus wallclock
+ * time. Conceivably we could write an estimator that smooths time per
+ * unit progress, and get different results.
+ */
+public class ExponentiallySmoothedTaskRuntimeEstimator extends StartEndTimesBase {
+
+ private final ConcurrentMap<TaskAttemptId, AtomicReference<EstimateVector>> estimates
+ = new ConcurrentHashMap<TaskAttemptId, AtomicReference<EstimateVector>>();
+
+ private SmoothedValue smoothedValue;
+
+ private long lambda;
+
+ public enum SmoothedValue {
+ RATE, TIME_PER_UNIT_PROGRESS
+ }
+
+ ExponentiallySmoothedTaskRuntimeEstimator
+ (long lambda, SmoothedValue smoothedValue) {
+ super();
+ this.smoothedValue = smoothedValue;
+ this.lambda = lambda;
+ }
+
+ public ExponentiallySmoothedTaskRuntimeEstimator() {
+ super();
+ }
+
+ // immutable
+ private class EstimateVector {
+ final double value;
+ final float basedOnProgress;
+ final long atTime;
+
+ EstimateVector(double value, float basedOnProgress, long atTime) {
+ this.value = value;
+ this.basedOnProgress = basedOnProgress;
+ this.atTime = atTime;
+ }
+
+ EstimateVector incorporate(float newProgress, long newAtTime) {
+ if (newAtTime <= atTime || newProgress < basedOnProgress) {
+ return this;
+ }
+
+ double oldWeighting
+ = value < 0.0
+ ? 0.0 : Math.exp(((double) (newAtTime - atTime)) / lambda);
+
+ double newRead = (newProgress - basedOnProgress) / (newAtTime - atTime);
+
+ if (smoothedValue == SmoothedValue.TIME_PER_UNIT_PROGRESS) {
+ newRead = 1.0 / newRead;
+ }
+
+ return new EstimateVector
+ (value * oldWeighting + newRead * (1.0 - oldWeighting),
+ newProgress, newAtTime);
+ }
+ }
+
+ private void incorporateReading
+ (TaskAttemptId attemptID, float newProgress, long newTime) {
+ //TODO: Refactor this method, it seems more complicated than necessary.
+ AtomicReference<EstimateVector> vectorRef = estimates.get(attemptID);
+
+ if (vectorRef == null) {
+ estimates.putIfAbsent(attemptID, new AtomicReference<EstimateVector>(null));
+ incorporateReading(attemptID, newProgress, newTime);
+ return;
+ }
+
+ EstimateVector oldVector = vectorRef.get();
+
+ if (oldVector == null) {
+ if (vectorRef.compareAndSet(null,
+ new EstimateVector(-1.0, 0.0F, Long.MIN_VALUE))) {
+ return;
+ }
+
+ incorporateReading(attemptID, newProgress, newTime);
+ return;
+ }
+
+ while (!vectorRef.compareAndSet
+ (oldVector, oldVector.incorporate(newProgress, newTime))) {
+ oldVector = vectorRef.get();
+ }
+ }
+
+ private EstimateVector getEstimateVector(TaskAttemptId attemptID) {
+ AtomicReference<EstimateVector> vectorRef = estimates.get(attemptID);
+
+ if (vectorRef == null) {
+ return null;
+ }
+
+ return vectorRef.get();
+ }
+
+ private static final long DEFAULT_EXPONENTIAL_SMOOTHING_LAMBDA_MILLISECONDS
+ = 1000L * 60;
+
+ @Override
+ public void contextualize(Configuration conf, AppContext context) {
+ super.contextualize(conf, context);
+
+ lambda
+ = conf.getLong(AMConstants.EXPONENTIAL_SMOOTHING_LAMBDA_MILLISECONDS,
+ DEFAULT_EXPONENTIAL_SMOOTHING_LAMBDA_MILLISECONDS);
+ smoothedValue
+ = conf.getBoolean(AMConstants.EXPONENTIAL_SMOOTHING_SMOOTH_RATE, true)
+ ? SmoothedValue.RATE : SmoothedValue.TIME_PER_UNIT_PROGRESS;
+ }
+
+ @Override
+ public long estimatedRuntime(TaskAttemptId id) {
+ Long startTime = startTimes.get(id);
+
+ if (startTime == null) {
+ return -1L;
+ }
+
+ EstimateVector vector = getEstimateVector(id);
+
+ if (vector == null) {
+ return -1L;
+ }
+
+ long sunkTime = vector.atTime - startTime;
+
+ double value = vector.value;
+ float progress = vector.basedOnProgress;
+
+ if (value == 0) {
+ return -1L;
+ }
+
+ double rate = smoothedValue == SmoothedValue.RATE ? value : 1.0 / value;
+
+ if (rate == 0.0) {
+ return -1L;
+ }
+
+ double remainingTime = (1.0 - progress) / rate;
+
+ return sunkTime + (long)remainingTime;
+ }
+
+ @Override
+ public long runtimeEstimateVariance(TaskAttemptId id) {
+ return -1L;
+ }
+
+ @Override
+ public void updateAttempt(TaskAttemptStatus status, long timestamp) {
+ super.updateAttempt(status, timestamp);
+ TaskAttemptId attemptID = status.id;
+
+ float progress = status.progress;
+
+ incorporateReading(attemptID, progress, timestamp);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/LegacyTaskRuntimeEstimator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/LegacyTaskRuntimeEstimator.java
new file mode 100644
index 0000000..aed71e8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/LegacyTaskRuntimeEstimator.java
@@ -0,0 +1,150 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.speculate;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
+
+
+
+
+public class LegacyTaskRuntimeEstimator extends StartEndTimesBase {
+
+ private final Map<TaskAttempt, AtomicLong> attemptRuntimeEstimates
+ = new ConcurrentHashMap<TaskAttempt, AtomicLong>();
+ private final ConcurrentHashMap<TaskAttempt, AtomicLong> attemptRuntimeEstimateVariances
+ = new ConcurrentHashMap<TaskAttempt, AtomicLong>();
+
+ @Override
+ public void updateAttempt(TaskAttemptStatus status, long timestamp) {
+ super.updateAttempt(status, timestamp);
+
+
+ TaskAttemptId attemptID = status.id;
+ TaskId taskID = attemptID.getTaskId();
+ JobId jobID = taskID.getJobId();
+ Job job = context.getJob(jobID);
+
+ if (job == null) {
+ return;
+ }
+
+ Task task = job.getTask(taskID);
+
+ if (task == null) {
+ return;
+ }
+
+ TaskAttempt taskAttempt = task.getAttempt(attemptID);
+
+ if (taskAttempt == null) {
+ return;
+ }
+
+ Long boxedStart = startTimes.get(attemptID);
+ long start = boxedStart == null ? Long.MIN_VALUE : boxedStart;
+
+ // We need to do two things.
+ // 1: If this is a completion, we accumulate statistics in the superclass
+ // 2: If this is not a completion, we learn more about it.
+
+ // This is not a completion, but we're cooking.
+ //
+ if (taskAttempt.getState() == TaskAttemptState.RUNNING) {
+ // See if this task is already in the registry
+ AtomicLong estimateContainer = attemptRuntimeEstimates.get(taskAttempt);
+ AtomicLong estimateVarianceContainer
+ = attemptRuntimeEstimateVariances.get(taskAttempt);
+
+ if (estimateContainer == null) {
+ if (attemptRuntimeEstimates.get(taskAttempt) == null) {
+ attemptRuntimeEstimates.put(taskAttempt, new AtomicLong());
+
+ estimateContainer = attemptRuntimeEstimates.get(taskAttempt);
+ }
+ }
+
+ if (estimateVarianceContainer == null) {
+ attemptRuntimeEstimateVariances.putIfAbsent(taskAttempt, new AtomicLong());
+ estimateVarianceContainer = attemptRuntimeEstimateVariances.get(taskAttempt);
+ }
+
+
+ long estimate = -1;
+ long varianceEstimate = -1;
+
+ // This code assumes that we'll never consider starting a third
+ // speculative task attempt if two are already running for this task
+ if (start > 0 && timestamp > start) {
+ estimate = (long) ((timestamp - start) / Math.max(0.0001, status.progress));
+ varianceEstimate = (long) (estimate * status.progress / 10);
+ }
+ if (estimateContainer != null) {
+ estimateContainer.set(estimate);
+ }
+ if (estimateVarianceContainer != null) {
+ estimateVarianceContainer.set(varianceEstimate);
+ }
+ }
+ }
+
+ private long storedPerAttemptValue
+ (Map<TaskAttempt, AtomicLong> data, TaskAttemptId attemptID) {
+ TaskId taskID = attemptID.getTaskId();
+ JobId jobID = taskID.getJobId();
+ Job job = context.getJob(jobID);
+
+ Task task = job.getTask(taskID);
+
+ if (task == null) {
+ return -1L;
+ }
+
+ TaskAttempt taskAttempt = task.getAttempt(attemptID);
+
+ if (taskAttempt == null) {
+ return -1L;
+ }
+
+ AtomicLong estimate = data.get(taskAttempt);
+
+ return estimate == null ? -1L : estimate.get();
+
+ }
+
+ @Override
+ public long estimatedRuntime(TaskAttemptId attemptID) {
+ return storedPerAttemptValue(attemptRuntimeEstimates, attemptID);
+ }
+
+ @Override
+ public long runtimeEstimateVariance(TaskAttemptId attemptID) {
+ return storedPerAttemptValue(attemptRuntimeEstimateVariances, attemptID);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/NullTaskRuntimesEngine.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/NullTaskRuntimesEngine.java
new file mode 100644
index 0000000..7211ff4
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/NullTaskRuntimesEngine.java
@@ -0,0 +1,72 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.speculate;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
+
+
+/*
+ * This class is provided solely as an exemplae of the values that mean
+ * that nothing needs to be computed. It's not currently used.
+ */
+public class NullTaskRuntimesEngine implements TaskRuntimeEstimator {
+ @Override
+ public void enrollAttempt(TaskAttemptStatus status, long timestamp) {
+ // no code
+ }
+
+ @Override
+ public long attemptEnrolledTime(TaskAttemptId attemptID) {
+ return Long.MAX_VALUE;
+ }
+
+ @Override
+ public void updateAttempt(TaskAttemptStatus status, long timestamp) {
+ // no code
+ }
+
+ @Override
+ public void contextualize(Configuration conf, AppContext context) {
+ // no code
+ }
+
+ @Override
+ public long thresholdRuntime(TaskId id) {
+ return Long.MAX_VALUE;
+ }
+
+ @Override
+ public long estimatedRuntime(TaskAttemptId id) {
+ return -1L;
+ }
+ @Override
+ public long estimatedNewAttemptRuntime(TaskId id) {
+ return -1L;
+ }
+
+ @Override
+ public long runtimeEstimateVariance(TaskAttemptId id) {
+ return -1L;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/Speculator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/Speculator.java
new file mode 100644
index 0000000..14fcbe8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/Speculator.java
@@ -0,0 +1,45 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.speculate;
+
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
+import org.apache.hadoop.yarn.event.EventHandler;
+
+/**
+ * Speculator component. Task Attempts' status updates are sent to this
+ * component. Concrete implementation runs the speculative algorithm and
+ * sends the TaskEventType.T_ADD_ATTEMPT.
+ *
+ * An implementation also has to arrange for the jobs to be scanned from
+ * time to time, to launch the speculations.
+ */
+public interface Speculator
+ extends EventHandler<SpeculatorEvent> {
+
+ enum EventType {
+ ATTEMPT_STATUS_UPDATE,
+ ATTEMPT_START,
+ TASK_CONTAINER_NEED_UPDATE,
+ JOB_CREATE
+ }
+
+ // This will be implemented if we go to a model where the events are
+ // processed within the TaskAttempts' state transitions' code.
+ public void handleAttempt(TaskAttemptStatus status);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/SpeculatorEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/SpeculatorEvent.java
new file mode 100644
index 0000000..d68bd25
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/SpeculatorEvent.java
@@ -0,0 +1,86 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.speculate;
+
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
+import org.apache.hadoop.yarn.event.AbstractEvent;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+
+public class SpeculatorEvent extends AbstractEvent<Speculator.EventType> {
+
+ // valid for ATTEMPT_STATUS_UPDATE
+ private TaskAttemptStatus reportedStatus;
+
+ // valid for TASK_CONTAINER_NEED_UPDATE
+ private TaskId taskID;
+ private int containersNeededChange;
+
+ // valid for CREATE_JOB
+ private JobId jobID;
+
+ public SpeculatorEvent(JobId jobID, long timestamp) {
+ super(Speculator.EventType.JOB_CREATE, timestamp);
+ this.jobID = jobID;
+ }
+
+ public SpeculatorEvent(TaskAttemptStatus reportedStatus, long timestamp) {
+ super(Speculator.EventType.ATTEMPT_STATUS_UPDATE, timestamp);
+ this.reportedStatus = reportedStatus;
+ }
+
+ public SpeculatorEvent(TaskAttemptId attemptID, boolean flag, long timestamp) {
+ super(Speculator.EventType.ATTEMPT_START, timestamp);
+ this.reportedStatus = new TaskAttemptStatus();
+ this.reportedStatus.id = attemptID;
+ this.taskID = attemptID.getTaskId();
+ }
+
+ /*
+ * This c'tor creates a TASK_CONTAINER_NEED_UPDATE event .
+ * We send a +1 event when a task enters a state where it wants a container,
+ * and a -1 event when it either gets one or withdraws the request.
+ * The per job sum of all these events is the number of containers requested
+ * but not granted. The intent is that we only do speculations when the
+ * speculation wouldn't compete for containers with tasks which need
+ * to be run.
+ */
+ public SpeculatorEvent(TaskId taskID, int containersNeededChange) {
+ super(Speculator.EventType.TASK_CONTAINER_NEED_UPDATE);
+ this.taskID = taskID;
+ this.containersNeededChange = containersNeededChange;
+ }
+
+ public TaskAttemptStatus getReportedStatus() {
+ return reportedStatus;
+ }
+
+ public int containersNeededChange() {
+ return containersNeededChange;
+ }
+
+ public TaskId getTaskID() {
+ return taskID;
+ }
+
+ public JobId getJobID() {
+ return jobID;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/StartEndTimesBase.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/StartEndTimesBase.java
new file mode 100644
index 0000000..a716047
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/StartEndTimesBase.java
@@ -0,0 +1,213 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.speculate;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+
+abstract class StartEndTimesBase implements TaskRuntimeEstimator {
+ static final float MINIMUM_COMPLETE_PROPORTION_TO_SPECULATE
+ = 0.05F;
+ static final int MINIMUM_COMPLETE_NUMBER_TO_SPECULATE
+ = 1;
+
+ protected Configuration conf = null;
+ protected AppContext context = null;
+
+ protected final Map<TaskAttemptId, Long> startTimes
+ = new ConcurrentHashMap<TaskAttemptId, Long>();
+
+ // XXXX This class design assumes that the contents of AppContext.getAllJobs
+ // never changes. Is that right?
+ //
+ // This assumption comes in in several places, mostly in data structure that
+ // can grow without limit if a AppContext gets new Job's when the old ones
+ // run out. Also, these mapper statistics blocks won't cover the Job's
+ // we don't know about.
+ protected final Map<Job, DataStatistics> mapperStatistics
+ = new HashMap<Job, DataStatistics>();
+ protected final Map<Job, DataStatistics> reducerStatistics
+ = new HashMap<Job, DataStatistics>();
+
+
+ private final Map<Job, Float> slowTaskRelativeTresholds
+ = new HashMap<Job, Float>();
+
+ protected final Set<Task> doneTasks = new HashSet<Task>();
+
+ @Override
+ public void enrollAttempt(TaskAttemptStatus status, long timestamp) {
+ startTimes.put(status.id,timestamp);
+ }
+
+ @Override
+ public long attemptEnrolledTime(TaskAttemptId attemptID) {
+ Long result = startTimes.get(attemptID);
+
+ return result == null ? Long.MAX_VALUE : result;
+ }
+
+
+ @Override
+ public void contextualize(Configuration conf, AppContext context) {
+ this.conf = conf;
+ this.context = context;
+
+ Map<JobId, Job> allJobs = context.getAllJobs();
+
+ for (Map.Entry<JobId, Job> entry : allJobs.entrySet()) {
+ final Job job = entry.getValue();
+ mapperStatistics.put(job, new DataStatistics());
+ reducerStatistics.put(job, new DataStatistics());
+ slowTaskRelativeTresholds.put
+ (job, conf.getFloat(MRJobConfig.SPECULATIVE_SLOWTASK_THRESHOLD,1.0f));
+ }
+ }
+
+ protected DataStatistics dataStatisticsForTask(TaskId taskID) {
+ JobId jobID = taskID.getJobId();
+ Job job = context.getJob(jobID);
+
+ if (job == null) {
+ return null;
+ }
+
+ Task task = job.getTask(taskID);
+
+ if (task == null) {
+ return null;
+ }
+
+ return task.getType() == TaskType.MAP
+ ? mapperStatistics.get(job)
+ : task.getType() == TaskType.REDUCE
+ ? reducerStatistics.get(job)
+ : null;
+ }
+
+ @Override
+ public long thresholdRuntime(TaskId taskID) {
+ JobId jobID = taskID.getJobId();
+ Job job = context.getJob(jobID);
+
+ TaskType type = taskID.getTaskType();
+
+ DataStatistics statistics
+ = dataStatisticsForTask(taskID);
+
+ int completedTasksOfType
+ = type == TaskType.MAP
+ ? job.getCompletedMaps() : job.getCompletedReduces();
+
+ int totalTasksOfType
+ = type == TaskType.MAP
+ ? job.getTotalMaps() : job.getTotalReduces();
+
+ if (completedTasksOfType < MINIMUM_COMPLETE_NUMBER_TO_SPECULATE
+ || (((float)completedTasksOfType) / totalTasksOfType)
+ < MINIMUM_COMPLETE_PROPORTION_TO_SPECULATE ) {
+ return Long.MAX_VALUE;
+ }
+
+ long result = statistics == null
+ ? Long.MAX_VALUE
+ : (long)statistics.outlier(slowTaskRelativeTresholds.get(job));
+ return result;
+ }
+
+ @Override
+ public long estimatedNewAttemptRuntime(TaskId id) {
+ DataStatistics statistics = dataStatisticsForTask(id);
+
+ if (statistics == null) {
+ return -1L;
+ }
+
+ return (long)statistics.mean();
+ }
+
+ @Override
+ public void updateAttempt(TaskAttemptStatus status, long timestamp) {
+
+ TaskAttemptId attemptID = status.id;
+ TaskId taskID = attemptID.getTaskId();
+ JobId jobID = taskID.getJobId();
+ Job job = context.getJob(jobID);
+
+ if (job == null) {
+ return;
+ }
+
+ Task task = job.getTask(taskID);
+
+ if (task == null) {
+ return;
+ }
+
+ Long boxedStart = startTimes.get(attemptID);
+ long start = boxedStart == null ? Long.MIN_VALUE : boxedStart;
+
+ TaskAttempt taskAttempt = task.getAttempt(attemptID);
+
+ if (taskAttempt.getState() == TaskAttemptState.SUCCEEDED) {
+ boolean isNew = false;
+ // is this a new success?
+ synchronized (doneTasks) {
+ if (!doneTasks.contains(task)) {
+ doneTasks.add(task);
+ isNew = true;
+ }
+ }
+
+ // It's a new completion
+ // Note that if a task completes twice [because of a previous speculation
+ // and a race, or a success followed by loss of the machine with the
+ // local data] we only count the first one.
+ if (isNew) {
+ long finish = timestamp;
+ if (start > 1L && finish > 1L && start <= finish) {
+ long duration = finish - start;
+
+ DataStatistics statistics
+ = dataStatisticsForTask(taskID);
+
+ if (statistics != null) {
+ statistics.add(duration);
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskRuntimeEstimator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskRuntimeEstimator.java
new file mode 100644
index 0000000..93e5ae3
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskRuntimeEstimator.java
@@ -0,0 +1,90 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.speculate;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
+
+
+
+public interface TaskRuntimeEstimator {
+ public void enrollAttempt(TaskAttemptStatus reportedStatus, long timestamp);
+
+ public long attemptEnrolledTime(TaskAttemptId attemptID);
+
+ public void updateAttempt(TaskAttemptStatus reportedStatus, long timestamp);
+
+ public void contextualize(Configuration conf, AppContext context);
+
+ /**
+ *
+ * Find a maximum reasonable execution wallclock time. Includes the time
+ * already elapsed.
+ *
+ * Find a maximum reasonable execution time. Includes the time
+ * already elapsed. If the projected total execution time for this task
+ * ever exceeds its reasonable execution time, we may speculate it.
+ *
+ * @param id the {@link TaskID} of the task we are asking about
+ * @return the task's maximum reasonable runtime, or MAX_VALUE if
+ * we don't have enough information to rule out any runtime,
+ * however long.
+ *
+ */
+ public long thresholdRuntime(TaskId id);
+
+ /**
+ *
+ * Estimate a task attempt's total runtime. Includes the time already
+ * elapsed.
+ *
+ * @param id the {@link TaskAttemptID} of the attempt we are asking about
+ * @return our best estimate of the attempt's runtime, or {@code -1} if
+ * we don't have enough information yet to produce an estimate.
+ *
+ */
+ public long estimatedRuntime(TaskAttemptId id);
+
+ /**
+ *
+ * Estimates how long a new attempt on this task will take if we start
+ * one now
+ *
+ * @param id the {@link TaskID} of the task we are asking about
+ * @return our best estimate of a new attempt's runtime, or {@code -1} if
+ * we don't have enough information yet to produce an estimate.
+ *
+ */
+ public long estimatedNewAttemptRuntime(TaskId id);
+
+ /**
+ *
+ * Computes the width of the error band of our estimate of the task
+ * runtime as returned by {@link estimatedRuntime}
+ *
+ * @param id the {@link TaskAttemptID} of the attempt we are asking about
+ * @return our best estimate of the attempt's runtime, or {@code -1} if
+ * we don't have enough information yet to produce an estimate.
+ *
+ */
+ public long runtimeEstimateVariance(TaskAttemptId id);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskSpeculationPredicate.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskSpeculationPredicate.java
new file mode 100644
index 0000000..76ca50a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskSpeculationPredicate.java
@@ -0,0 +1,39 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.speculate;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+
+
+public class TaskSpeculationPredicate {
+ boolean canSpeculate(AppContext context, TaskId taskID) {
+ // This class rejects speculating any task that already has speculations,
+ // or isn't running.
+ // Subclasses should call TaskSpeculationPredicate.canSpeculate(...) , but
+ // can be even more restrictive.
+ JobId jobID = taskID.getJobId();
+ Job job = context.getJob(jobID);
+ Task task = job.getTask(taskID);
+ return task.getAttempts().size() == 1;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/taskclean/TaskCleaner.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/taskclean/TaskCleaner.java
new file mode 100644
index 0000000..43b821d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/taskclean/TaskCleaner.java
@@ -0,0 +1,28 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.taskclean;
+
+import org.apache.hadoop.yarn.event.EventHandler;
+
+public interface TaskCleaner extends EventHandler<TaskCleanupEvent> {
+
+ enum EventType {
+ TASK_CLEAN
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/taskclean/TaskCleanerImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/taskclean/TaskCleanerImpl.java
new file mode 100644
index 0000000..b18b334
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/taskclean/TaskCleanerImpl.java
@@ -0,0 +1,108 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.taskclean;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.service.AbstractService;
+
+public class TaskCleanerImpl extends AbstractService implements TaskCleaner {
+
+ private static final Log LOG = LogFactory.getLog(TaskCleanerImpl.class);
+
+ private final AppContext context;
+ private ThreadPoolExecutor launcherPool;
+ private Thread eventHandlingThread;
+ private BlockingQueue<TaskCleanupEvent> eventQueue =
+ new LinkedBlockingQueue<TaskCleanupEvent>();
+
+ public TaskCleanerImpl(AppContext context) {
+ super("TaskCleaner");
+ this.context = context;
+ }
+
+ public void start() {
+ launcherPool = new ThreadPoolExecutor(1, 5, 1,
+ TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>());
+ eventHandlingThread = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ TaskCleanupEvent event = null;
+ while (!Thread.currentThread().isInterrupted()) {
+ try {
+ event = eventQueue.take();
+ } catch (InterruptedException e) {
+ LOG.error("Returning, interrupted : " + e);
+ return;
+ }
+ // the events from the queue are handled in parallel
+ // using a thread pool
+ launcherPool.execute(new EventProcessor(event)); }
+ }
+ });
+ eventHandlingThread.start();
+ super.start();
+ }
+
+ public void stop() {
+ eventHandlingThread.interrupt();
+ launcherPool.shutdown();
+ super.stop();
+ }
+
+ private class EventProcessor implements Runnable {
+ private TaskCleanupEvent event;
+
+ EventProcessor(TaskCleanupEvent event) {
+ this.event = event;
+ }
+
+ @Override
+ public void run() {
+ LOG.info("Processing the event " + event.toString());
+ try {
+ event.getCommitter().abortTask(event.getAttemptContext());
+ } catch (Exception e) {
+ LOG.warn("Task cleanup failed for attempt " + event.getAttemptID(), e);
+ }
+ context.getEventHandler().handle(
+ new TaskAttemptEvent(event.getAttemptID(),
+ TaskAttemptEventType.TA_CLEANUP_DONE));
+ }
+ }
+
+ @Override
+ public void handle(TaskCleanupEvent event) {
+ try {
+ eventQueue.put(event);
+ } catch (InterruptedException e) {
+ throw new YarnException(e);
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/taskclean/TaskCleanupEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/taskclean/TaskCleanupEvent.java
new file mode 100644
index 0000000..288f9fd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/taskclean/TaskCleanupEvent.java
@@ -0,0 +1,56 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.taskclean;
+
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.yarn.event.AbstractEvent;
+
+/**
+ * This class encapsulates task cleanup event.
+ *
+ */
+public class TaskCleanupEvent extends AbstractEvent<TaskCleaner.EventType> {
+
+ private final TaskAttemptId attemptID;
+ private final OutputCommitter committer;
+ private final TaskAttemptContext attemptContext;
+
+ public TaskCleanupEvent(TaskAttemptId attemptID, OutputCommitter committer,
+ TaskAttemptContext attemptContext) {
+ super(TaskCleaner.EventType.TASK_CLEAN);
+ this.attemptID = attemptID;
+ this.committer = committer;
+ this.attemptContext = attemptContext;
+ }
+
+ public TaskAttemptId getAttemptID() {
+ return attemptID;
+ }
+
+ public OutputCommitter getCommitter() {
+ return committer;
+ }
+
+ public TaskAttemptContext getAttemptContext() {
+ return attemptContext;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMParams.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMParams.java
new file mode 100644
index 0000000..7dfdefa
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMParams.java
@@ -0,0 +1,31 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+/**
+ * Params constants for the AM webapp and the history webapp.
+ */
+public interface AMParams {
+ static final String RM_WEB = "rm.web";
+ static final String APP_ID = "app.id";
+ static final String JOB_ID = "job.id";
+ static final String TASK_ID = "task.id";
+ static final String TASK_TYPE = "task.type";
+ static final String ATTEMPT_STATE = "attempt.state";
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebApp.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebApp.java
new file mode 100644
index 0000000..f74c409
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebApp.java
@@ -0,0 +1,41 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import static org.apache.hadoop.yarn.util.StringHelper.*;
+
+import org.apache.hadoop.yarn.webapp.WebApp;
+
+/**
+ * Application master webapp
+ */
+public class AMWebApp extends WebApp implements AMParams {
+
+ @Override
+ public void setup() {
+ route("/", AppController.class);
+ route("/app", AppController.class);
+ route(pajoin("/job", JOB_ID), AppController.class, "job");
+ route(pajoin("/jobcounters", JOB_ID), AppController.class, "jobCounters");
+ route(pajoin("/tasks", JOB_ID, TASK_TYPE), AppController.class, "tasks");
+ route(pajoin("/attempts", JOB_ID, TASK_TYPE, ATTEMPT_STATE),
+ AppController.class, "attempts");
+ route(pajoin("/task", TASK_ID), AppController.class, "task");
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/App.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/App.java
new file mode 100644
index 0000000..bb28ce6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/App.java
@@ -0,0 +1,38 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import com.google.inject.Inject;
+import com.google.inject.servlet.RequestScoped;
+
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+
+@RequestScoped
+public class App {
+ final AppContext context;
+ Job job;
+ Task task;
+
+ @Inject
+ App(AppContext ctx) {
+ context = ctx;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
new file mode 100644
index 0000000..adc0e14
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
@@ -0,0 +1,169 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import static org.apache.hadoop.yarn.util.StringHelper.join;
+
+import java.util.Locale;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.util.Apps;
+import org.apache.hadoop.yarn.util.Times;
+import org.apache.hadoop.yarn.webapp.Controller;
+
+import com.google.inject.Inject;
+
+public class AppController extends Controller implements AMParams {
+ final App app;
+
+ protected AppController(App app, Configuration conf, RequestContext ctx,
+ String title) {
+ super(ctx);
+ this.app = app;
+ set(APP_ID, Apps.toString(app.context.getApplicationID()));
+ set(RM_WEB, YarnConfiguration.getRMWebAppURL(conf));
+ }
+
+ @Inject
+ protected AppController(App app, Configuration conf, RequestContext ctx) {
+ this(app, conf, ctx, "am");
+ }
+
+ @Override public void index() {
+ setTitle(join("MapReduce Application ", $(APP_ID)));
+ }
+
+ public void info() {
+ info("Application Master Overview").
+ _("Application ID:", $(APP_ID)).
+ _("Application Name:", app.context.getApplicationName()).
+ _("User:", app.context.getUser()).
+ _("Started on:", Times.format(app.context.getStartTime())).
+ _("Elasped: ", org.apache.hadoop.util.StringUtils.formatTime(
+ Times.elapsed(app.context.getStartTime(), 0)));
+ render(InfoPage.class);
+ }
+
+ public void job() {
+ requireJob();
+ render(JobPage.class);
+ }
+
+ public void jobCounters() {
+ requireJob();
+ if (app.job != null) {
+ setTitle(join("Counters for ", $(JOB_ID)));
+ }
+ render(CountersPage.class);
+ }
+
+ public void tasks() {
+ requireJob();
+ if (app.job != null) {
+ try {
+ String tt = $(TASK_TYPE);
+ tt = tt.isEmpty() ? "All" : StringUtils.capitalize(MRApps.taskType(tt).
+ toString().toLowerCase(Locale.US));
+ setTitle(join(tt, " Tasks for ", $(JOB_ID)));
+ } catch (Exception e) {
+ badRequest(e.getMessage());
+ }
+ }
+ render(TasksPage.class);
+ }
+
+ public void task() {
+ requireTask();
+ if (app.task != null) {
+ setTitle(join("Attempts for ", $(TASK_ID)));
+ }
+ render(TaskPage.class);
+ }
+
+ public void attempts() {
+ requireJob();
+ if (app.job != null) {
+ try {
+ String taskType = $(TASK_TYPE);
+ if (taskType.isEmpty()) {
+ throw new RuntimeException("missing task-type.");
+ }
+ String attemptState = $(ATTEMPT_STATE);
+ if (attemptState.isEmpty()) {
+ throw new RuntimeException("missing attempt-state.");
+ }
+ setTitle(join(attemptState, " ",
+ MRApps.taskType(taskType).toString(), " attempts in ", $(JOB_ID)));
+ } catch (Exception e) {
+ badRequest(e.getMessage());
+ }
+ }
+ render(AttemptsPage.class);
+ }
+
+ void badRequest(String s) {
+ setStatus(response().SC_BAD_REQUEST);
+ setTitle(join("Bad request: ", s));
+ }
+
+ void notFound(String s) {
+ setStatus(response().SC_NOT_FOUND);
+ setTitle(join("Not found: ", s));
+ }
+
+ void requireJob() {
+ try {
+ if ($(JOB_ID).isEmpty()) {
+ throw new RuntimeException("missing job ID");
+ }
+ JobId jobID = MRApps.toJobID($(JOB_ID));
+ app.job = app.context.getJob(jobID);
+ if (app.job == null) {
+ notFound($(JOB_ID));
+ }
+ } catch (Exception e) {
+ badRequest(e.getMessage() == null ? e.getClass().getName() : e.getMessage());
+ }
+ }
+
+ void requireTask() {
+ try {
+ if ($(TASK_ID).isEmpty()) {
+ throw new RuntimeException("missing task ID");
+ }
+ TaskId taskID = MRApps.toTaskID($(TASK_ID));
+ app.job = app.context.getJob(taskID.getJobId());
+ if (app.job == null) {
+ notFound(MRApps.toString(taskID.getJobId()));
+ } else {
+ app.task = app.job.getTask(taskID);
+ if (app.task == null) {
+ notFound($(TASK_ID));
+ }
+ }
+ } catch (Exception e) {
+ badRequest(e.getMessage());
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppView.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppView.java
new file mode 100644
index 0000000..613bda5
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppView.java
@@ -0,0 +1,59 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.view.TwoColumnLayout;
+
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
+
+public class AppView extends TwoColumnLayout {
+
+ @Override protected void preHead(Page.HTML<_> html) {
+ commonPreHead(html);
+ set(DATATABLES_ID, "jobs");
+ set(initID(DATATABLES, "jobs"), jobsTableInit());
+ setTableStyles(html, "jobs");
+ }
+
+ protected void commonPreHead(Page.HTML<_> html) {
+ html.meta_http("refresh", "10");
+ set(ACCORDION_ID, "nav");
+ set(initID(ACCORDION, "nav"), "{autoHeight:false, active:1}");
+ set(THEMESWITCHER_ID, "themeswitcher");
+ }
+
+ @Override
+ protected Class<? extends SubView> nav() {
+ return NavBlock.class;
+ }
+
+ @Override
+ protected Class<? extends SubView> content() {
+ return JobsBlock.class;
+ }
+
+ private String jobsTableInit() {
+ return tableInit().
+ append(",aoColumns:[{sType:'title-numeric'},").
+ append("null,null,{sType:'title-numeric', bSearchable:false},null,").
+ append("null,{sType:'title-numeric',bSearchable:false}, null, null]}").
+ toString();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AttemptsPage.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AttemptsPage.java
new file mode 100644
index 0000000..d0cd23e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AttemptsPage.java
@@ -0,0 +1,76 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_TYPE;
+import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.ATTEMPT_STATE;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
+import org.apache.hadoop.yarn.webapp.SubView;
+
+import com.google.inject.Inject;
+
+public class AttemptsPage extends TaskPage {
+ static class FewAttemptsBlock extends TaskPage.AttemptsBlock {
+ @Inject
+ FewAttemptsBlock(App ctx) {
+ super(ctx);
+ }
+
+ @Override
+ protected boolean isValidRequest() {
+ return true;
+ }
+
+ @Override
+ protected Collection<TaskAttempt> getTaskAttempts() {
+ List<TaskAttempt> fewTaskAttemps = new ArrayList<TaskAttempt>();
+ String taskTypeStr = $(TASK_TYPE);
+ TaskType taskType = MRApps.taskType(taskTypeStr);
+ String attemptStateStr = $(ATTEMPT_STATE);
+ TaskAttemptStateUI neededState = MRApps
+ .taskAttemptState(attemptStateStr);
+ for (Task task : super.app.job.getTasks(taskType).values()) {
+ Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
+ for (TaskAttempt attempt : attempts.values()) {
+ if (neededState.correspondsTo(attempt.getState())) {
+ fewTaskAttemps.add(attempt);
+ }
+ }
+ }
+ return fewTaskAttemps;
+ }
+ }
+
+ @Override
+ protected Class<? extends SubView> content() {
+ return FewAttemptsBlock.class;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
new file mode 100644
index 0000000..bd95599
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
@@ -0,0 +1,161 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import com.google.inject.Inject;
+import java.util.Map;
+
+import org.apache.hadoop.mapreduce.v2.api.records.Counter;
+import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+
+import static org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp.*;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
+
+public class CountersBlock extends HtmlBlock {
+ Job job;
+ Task task;
+ Counters total;
+ Counters map;
+ Counters reduce;
+
+ @Inject CountersBlock(AppContext appCtx, ViewContext ctx) {
+ super(ctx);
+ getCounters(appCtx);
+ }
+
+ @Override protected void render(Block html) {
+ if (job == null) {
+ html.
+ p()._("Sorry, no counters for nonexistent", $(JOB_ID, "job"))._();
+ return;
+ }
+ if (!$(TASK_ID).isEmpty() && task == null) {
+ html.
+ p()._("Sorry, no counters for nonexistent", $(TASK_ID, "task"))._();
+ return;
+ }
+ int numGroups = 0;
+ TBODY<TABLE<DIV<Hamlet>>> tbody = html.
+ div(_INFO_WRAP).
+ table("#counters").
+ thead().
+ tr().
+ th(".group.ui-state-default", "Counter Group").
+ th(".ui-state-default", "Counters")._()._().
+ tbody();
+ for (CounterGroup g : total.getAllCounterGroups().values()) {
+ CounterGroup mg = map == null ? null : map.getCounterGroup(g.getName());
+ CounterGroup rg = reduce == null ? null : reduce.getCounterGroup(g.getName());
+ ++numGroups;
+ // This is mostly for demonstration :) Typically we'd introduced
+ // a CounterGroup block to reduce the verbosity. OTOH, this
+ // serves as an indicator of where we're in the tag hierarchy.
+ TR<THEAD<TABLE<TD<TR<TBODY<TABLE<DIV<Hamlet>>>>>>>> groupHeadRow = tbody.
+ tr().
+ th().$title(g.getName()).
+ _(fixGroupDisplayName(g.getDisplayName()))._().
+ td().$class(C_TABLE).
+ table(".dt-counters").
+ thead().
+ tr().th(".name", "Name");
+ if (map != null) {
+ groupHeadRow.th("Map").th("Reduce");
+ }
+ // Ditto
+ TBODY<TABLE<TD<TR<TBODY<TABLE<DIV<Hamlet>>>>>>> group = groupHeadRow.
+ th(map == null ? "Value" : "Total")._()._().
+ tbody();
+ for (Counter counter : g.getAllCounters().values()) {
+ // Ditto
+ TR<TBODY<TABLE<TD<TR<TBODY<TABLE<DIV<Hamlet>>>>>>>> groupRow = group.
+ tr().
+ td().$title(counter.getName()).
+ _(counter.getDisplayName())._();
+ if (map != null) {
+ Counter mc = mg == null ? null : mg.getCounter(counter.getName());
+ Counter rc = rg == null ? null : rg.getCounter(counter.getName());
+ groupRow.
+ td(mc == null ? "0" : String.valueOf(mc.getValue())).
+ td(rc == null ? "0" : String.valueOf(rc.getValue()));
+ }
+ groupRow.td(String.valueOf(counter.getValue()))._();
+ }
+ group._()._()._()._();
+ }
+ tbody._()._()._();
+ }
+
+ private void getCounters(AppContext ctx) {
+ JobId jobID = null;
+ TaskId taskID = null;
+ String tid = $(TASK_ID);
+ if (!tid.isEmpty()) {
+ taskID = MRApps.toTaskID(tid);
+ jobID = taskID.getJobId();
+ } else {
+ String jid = $(JOB_ID);
+ if (!jid.isEmpty()) {
+ jobID = MRApps.toJobID(jid);
+ }
+ }
+ if (jobID == null) {
+ return;
+ }
+ job = ctx.getJob(jobID);
+ if (job == null) {
+ return;
+ }
+ if (taskID != null) {
+ task = job.getTask(taskID);
+ if (task == null) {
+ return;
+ }
+ total = task.getCounters();
+ return;
+ }
+ // Get all types of counters
+ Map<TaskId, Task> tasks = job.getTasks();
+ total = JobImpl.newCounters();
+ map = JobImpl.newCounters();
+ reduce = JobImpl.newCounters();
+ for (Task t : tasks.values()) {
+ Counters counters = t.getCounters();
+ JobImpl.incrAllCounters(total, counters);
+ switch (t.getType()) {
+ case MAP: JobImpl.incrAllCounters(map, counters); break;
+ case REDUCE: JobImpl.incrAllCounters(reduce, counters); break;
+ }
+ }
+ }
+
+ private String fixGroupDisplayName(CharSequence name) {
+ return name.toString().replace(".", ".\u200B").replace("$", "\u200B$");
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersPage.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersPage.java
new file mode 100644
index 0000000..9bd5ed1
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersPage.java
@@ -0,0 +1,47 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import org.apache.hadoop.yarn.webapp.SubView;
+
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
+
+public class CountersPage extends AppView {
+
+ @Override protected void preHead(Page.HTML<_> html) {
+ commonPreHead(html);
+ set(initID(ACCORDION, "nav"), "{autoHeight:false, active:2}");
+ set(DATATABLES_SELECTOR, "#counters .dt-counters");
+ set(initSelector(DATATABLES),
+ "{bJQueryUI:true, sDom:'t', iDisplayLength:-1}");
+ }
+
+ @Override protected void postHead(Page.HTML<_> html) {
+ html.
+ style("#counters, .dt-counters { table-layout: fixed }",
+ "#counters th { overflow: hidden; vertical-align: center }",
+ "#counters .dataTables_wrapper { min-height: 1em }",
+ "#counters .group { width: 10em }",
+ "#counters .name { width: 30em }");
+ }
+
+ @Override protected Class<? extends SubView> content() {
+ return CountersBlock.class;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/InfoPage.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/InfoPage.java
new file mode 100644
index 0000000..5163a01
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/InfoPage.java
@@ -0,0 +1,34 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.view.InfoBlock;
+
+public class InfoPage extends AppView {
+
+ @Override protected void preHead(Page.HTML<_> html) {
+ commonPreHead(html);
+ setTitle("About the Application Master");
+ }
+
+ @Override protected Class<? extends SubView> content() {
+ return InfoBlock.class;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java
new file mode 100644
index 0000000..4969a76
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java
@@ -0,0 +1,254 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import com.google.inject.Inject;
+import java.util.Date;
+import java.util.Map;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.util.Times;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+import org.apache.hadoop.yarn.webapp.view.InfoBlock;
+import static org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp.*;
+import static org.apache.hadoop.yarn.util.StringHelper.*;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
+
+public class JobBlock extends HtmlBlock {
+ final AppContext appContext;
+
+ int runningMapTasks = 0;
+ int pendingMapTasks = 0;
+ int runningReduceTasks = 0;
+ int pendingReduceTasks = 0;
+
+ int newMapAttempts = 0;
+ int runningMapAttempts = 0;
+ int killedMapAttempts = 0;
+ int failedMapAttempts = 0;
+ int successfulMapAttempts = 0;
+ int newReduceAttempts = 0;
+ int runningReduceAttempts = 0;
+ int killedReduceAttempts = 0;
+ int failedReduceAttempts = 0;
+ int successfulReduceAttempts = 0;
+
+ @Inject JobBlock(AppContext appctx) {
+ appContext = appctx;
+ }
+
+ @Override protected void render(Block html) {
+ String jid = $(JOB_ID);
+ if (jid.isEmpty()) {
+ html.
+ p()._("Sorry, can't do anything without a JobID.")._();
+ return;
+ }
+ JobId jobID = MRApps.toJobID(jid);
+ Job job = appContext.getJob(jobID);
+ if (job == null) {
+ html.
+ p()._("Sorry, ", jid, " not found.")._();
+ return;
+ }
+ JobReport jobReport = job.getReport();
+ String mapPct = percent(jobReport.getMapProgress());
+ String reducePct = percent(jobReport.getReduceProgress());
+ int mapTasks = job.getTotalMaps();
+ int mapTasksComplete = job.getCompletedMaps();
+ int reduceTasks = job.getTotalReduces();
+ int reducesTasksComplete = job.getCompletedReduces();
+ long startTime = jobReport.getStartTime();
+ long finishTime = jobReport.getFinishTime();
+ countTasksAndAttempts(job);
+ info("Job Overview").
+ _("Job Name:", job.getName()).
+ _("State:", job.getState()).
+ _("Uberized:", job.isUber()).
+ _("Started:", new Date(startTime)).
+ _("Elapsed:", StringUtils.formatTime(
+ Times.elapsed(startTime, finishTime)));
+ html.
+ _(InfoBlock.class).
+ div(_INFO_WRAP).
+
+ // Tasks table
+ table("#job").
+ tr().
+ th(_TH, "Task Type").
+ th(_TH, "Progress").
+ th(_TH, "Total").
+ th(_TH, "Pending").
+ th(_TH, "Running").
+ th(_TH, "Complete")._().
+ tr(_ODD).
+ th().
+ a(url("tasks", jid, "m"), "Map")._().
+ td().
+ div(_PROGRESSBAR).
+ $title(join(mapPct, '%')). // tooltip
+ div(_PROGRESSBAR_VALUE).
+ $style(join("width:", mapPct, '%'))._()._()._().
+ td(String.valueOf(mapTasks)).
+ td(String.valueOf(pendingMapTasks)).
+ td(String.valueOf(runningMapTasks)).
+ td(String.valueOf(mapTasksComplete))._().
+ tr(_EVEN).
+ th().
+ a(url("tasks", jid, "r"), "Reduce")._().
+ td().
+ div(_PROGRESSBAR).
+ $title(join(reducePct, '%')). // tooltip
+ div(_PROGRESSBAR_VALUE).
+ $style(join("width:", reducePct, '%'))._()._()._().
+ td(String.valueOf(reduceTasks)).
+ td(String.valueOf(pendingReduceTasks)).
+ td(String.valueOf(runningReduceTasks)).
+ td(String.valueOf(reducesTasksComplete))._()
+ ._().
+
+ // Attempts table
+ table("#job").
+ tr().
+ th(_TH, "Attempt Type").
+ th(_TH, "New").
+ th(_TH, "Running").
+ th(_TH, "Failed").
+ th(_TH, "Killed").
+ th(_TH, "Successful")._().
+ tr(_ODD).
+ th("Maps").
+ td().a(url("attempts", jid, "m",
+ TaskAttemptStateUI.NEW.toString()),
+ String.valueOf(newMapAttempts))._().
+ td().a(url("attempts", jid, "m",
+ TaskAttemptStateUI.RUNNING.toString()),
+ String.valueOf(runningMapAttempts))._().
+ td().a(url("attempts", jid, "m",
+ TaskAttemptStateUI.FAILED.toString()),
+ String.valueOf(failedMapAttempts))._().
+ td().a(url("attempts", jid, "m",
+ TaskAttemptStateUI.KILLED.toString()),
+ String.valueOf(killedMapAttempts))._().
+ td().a(url("attempts", jid, "m",
+ TaskAttemptStateUI.SUCCESSFUL.toString()),
+ String.valueOf(successfulMapAttempts))._().
+ _().
+ tr(_EVEN).
+ th("Reduces").
+ td().a(url("attempts", jid, "r",
+ TaskAttemptStateUI.NEW.toString()),
+ String.valueOf(newReduceAttempts))._().
+ td().a(url("attempts", jid, "r",
+ TaskAttemptStateUI.RUNNING.toString()),
+ String.valueOf(runningReduceAttempts))._().
+ td().a(url("attempts", jid, "r",
+ TaskAttemptStateUI.FAILED.toString()),
+ String.valueOf(failedReduceAttempts))._().
+ td().a(url("attempts", jid, "r",
+ TaskAttemptStateUI.KILLED.toString()),
+ String.valueOf(killedReduceAttempts))._().
+ td().a(url("attempts", jid, "r",
+ TaskAttemptStateUI.SUCCESSFUL.toString()),
+ String.valueOf(successfulReduceAttempts))._().
+ _().
+ _().
+ _();
+ }
+
+ private void countTasksAndAttempts(Job job) {
+ Map<TaskId, Task> tasks = job.getTasks();
+ for (Task task : tasks.values()) {
+ switch (task.getType()) {
+ case MAP:
+ // Task counts
+ switch (task.getState()) {
+ case RUNNING:
+ ++runningMapTasks;
+ break;
+ case SCHEDULED:
+ ++pendingMapTasks;
+ break;
+ }
+ break;
+ case REDUCE:
+ // Task counts
+ switch (task.getState()) {
+ case RUNNING:
+ ++runningReduceTasks;
+ break;
+ case SCHEDULED:
+ ++pendingReduceTasks;
+ break;
+ }
+ break;
+ }
+
+ // Attempts counts
+ Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
+ for (TaskAttempt attempt : attempts.values()) {
+
+ int newAttempts = 0, running = 0, successful = 0, failed = 0, killed =0;
+
+ if (TaskAttemptStateUI.NEW.correspondsTo(attempt.getState())) {
+ ++newAttempts;
+ } else if (TaskAttemptStateUI.RUNNING.correspondsTo(attempt
+ .getState())) {
+ ++running;
+ } else if (TaskAttemptStateUI.SUCCESSFUL.correspondsTo(attempt
+ .getState())) {
+ ++successful;
+ } else if (TaskAttemptStateUI.FAILED
+ .correspondsTo(attempt.getState())) {
+ ++failed;
+ } else if (TaskAttemptStateUI.KILLED
+ .correspondsTo(attempt.getState())) {
+ ++killed;
+ }
+
+ switch (task.getType()) {
+ case MAP:
+ newMapAttempts += newAttempts;
+ runningMapAttempts += running;
+ successfulMapAttempts += successful;
+ failedMapAttempts += failed;
+ killedMapAttempts += killed;
+ break;
+ case REDUCE:
+ newReduceAttempts += newAttempts;
+ runningReduceAttempts += running;
+ successfulReduceAttempts += successful;
+ failedReduceAttempts += failed;
+ killedReduceAttempts += killed;
+ break;
+ }
+ }
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobPage.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobPage.java
new file mode 100644
index 0000000..62b506e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobPage.java
@@ -0,0 +1,41 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID;
+import static org.apache.hadoop.yarn.util.StringHelper.join;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
+
+import org.apache.hadoop.yarn.webapp.SubView;
+
+public class JobPage extends AppView {
+
+ @Override protected void preHead(Page.HTML<_> html) {
+ String jobID = $(JOB_ID);
+ set(TITLE, jobID.isEmpty() ? "Bad request: missing job ID"
+ : join("MapReduce Job ", $(JOB_ID)));
+ commonPreHead(html);
+ set(initID(ACCORDION, "nav"), "{autoHeight:false, active:2}");
+ }
+
+ @Override protected Class<? extends SubView> content() {
+ return JobBlock.class;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobsBlock.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobsBlock.java
new file mode 100644
index 0000000..5e56295
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobsBlock.java
@@ -0,0 +1,92 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import com.google.inject.Inject;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+
+import static org.apache.hadoop.yarn.util.StringHelper.*;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
+
+public class JobsBlock extends HtmlBlock {
+ final AppContext appContext;
+
+ @Inject JobsBlock(AppContext appCtx) {
+ appContext = appCtx;
+ }
+
+ @Override protected void render(Block html) {
+ TBODY<TABLE<Hamlet>> tbody = html.
+ h2("Active Jobs").
+ table("#jobs").
+ thead().
+ tr().
+ th(".id", "Job ID").
+ th(".name", "Name").
+ th(".state", "State").
+ th("Map Progress").
+ th("Maps Total").
+ th("Maps Completed").
+ th("Reduce Progress").
+ th("Reduces Total").
+ th("Reduces Completed")._()._().
+ tbody();
+ for (Job job : appContext.getAllJobs().values()) {
+ String jobID = MRApps.toString(job.getID());
+ JobReport report = job.getReport();
+ String mapPct = percent(report.getMapProgress());
+ String mapsTotal = String.valueOf(job.getTotalMaps());
+ String mapsCompleted = String.valueOf(job.getCompletedMaps());
+ String reducePct = percent(report.getReduceProgress());
+ String reduceTotal = String.valueOf(job.getTotalReduces());
+ String reduceCompleted = String.valueOf(job.getCompletedReduces());
+ tbody.
+ tr().
+ td().
+ span().$title(String.valueOf(job.getID().getId()))._(). // for sorting
+ a(url("job", jobID), jobID)._().
+ td(job.getName().toString()).
+ td(job.getState().toString()).
+ td().
+ span().$title(mapPct)._(). // for sorting
+ div(_PROGRESSBAR).
+ $title(join(mapPct, '%')). // tooltip
+ div(_PROGRESSBAR_VALUE).
+ $style(join("width:", mapPct, '%'))._()._()._().
+ td(mapsTotal).
+ td(mapsCompleted).
+ td().
+ span().$title(reducePct)._(). // for sorting
+ div(_PROGRESSBAR).
+ $title(join(reducePct, '%')). // tooltip
+ div(_PROGRESSBAR_VALUE).
+ $style(join("width:", reducePct, '%'))._()._()._().
+ td(reduceTotal).
+ td(reduceCompleted)._();
+ }
+ tbody._()._();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
new file mode 100644
index 0000000..c7d6751
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
@@ -0,0 +1,67 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import com.google.inject.Inject;
+
+import static org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp.*;
+
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+
+public class NavBlock extends HtmlBlock {
+ final App app;
+
+ @Inject NavBlock(App app) { this.app = app; }
+
+ @Override protected void render(Block html) {
+ String rmweb = $(RM_WEB);
+ DIV<Hamlet> nav = html.
+ div("#nav").
+ h3("Cluster").
+ ul().
+ li().a(url(rmweb, prefix(), "cluster"), "About")._().
+ li().a(url(rmweb, prefix(), "apps"), "Applications")._().
+ li().a(url(rmweb, prefix(), "scheduler"), "Scheduler")._()._().
+ h3("Application").
+ ul().
+ li().a(url("app/info"), "About")._().
+ li().a(url("app"), "Jobs")._()._();
+ if (app.job != null) {
+ String jobid = MRApps.toString(app.job.getID());
+ nav.
+ h3("Job").
+ ul().
+ li().a(url("job", jobid), "Overview")._().
+ li().a(url("jobcounters", jobid), "Counters")._().
+ li().a(url("tasks", jobid, "m"), "Map tasks")._().
+ li().a(url("tasks", jobid, "r"), "Reduce tasks")._()._();
+ }
+ nav.
+ h3("Tools").
+ ul().
+ li().a("/conf", "Configuration")._().
+ li().a("/logs", "Local logs")._().
+ li().a("/stacks", "Server stacks")._().
+ li().a("/metrics", "Server metrics")._()._()._().
+ div("#themeswitcher")._();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
new file mode 100644
index 0000000..3671df0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
@@ -0,0 +1,124 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import java.util.Collection;
+
+import com.google.common.base.Joiner;
+import com.google.inject.Inject;
+
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.util.Times;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+import static org.apache.hadoop.yarn.util.StringHelper.*;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
+
+public class TaskPage extends AppView {
+
+ static class AttemptsBlock extends HtmlBlock {
+ final App app;
+
+ @Inject
+ AttemptsBlock(App ctx) {
+ app = ctx;
+ }
+
+ @Override
+ protected void render(Block html) {
+ if (!isValidRequest()) {
+ html.
+ h2($(TITLE));
+ return;
+ }
+ TBODY<TABLE<Hamlet>> tbody = html.
+ table("#attempts").
+ thead().
+ tr().
+ th(".id", "Attempt").
+ th(".progress", "Progress").
+ th(".state", "State").
+ th(".node", "node").
+ th(".tsh", "Started").
+ th(".tsh", "Finished").
+ th(".tsh", "Elapsed").
+ th(".note", "Note")._()._().
+ tbody();
+ for (TaskAttempt ta : getTaskAttempts()) {
+ String taid = MRApps.toString(ta.getID());
+ String progress = percent(ta.getProgress());
+ ContainerId containerId = ta.getAssignedContainerID();
+
+ String nodeHttpAddr = ta.getNodeHttpAddress();
+ long startTime = ta.getLaunchTime();
+ long finishTime = ta.getFinishTime();
+ long elapsed = Times.elapsed(startTime, finishTime);
+ TD<TR<TBODY<TABLE<Hamlet>>>> nodeTd = tbody.
+ tr().
+ td(".id", taid).
+ td(".progress", progress).
+ td(".state", ta.getState().toString()).
+ td().
+ a(".nodelink", url("http://", nodeHttpAddr), nodeHttpAddr);
+ if (containerId != null) {
+ String containerIdStr = ConverterUtils.toString(containerId);
+ nodeTd._(" ").
+ a(".logslink", url("http://", nodeHttpAddr, "yarn", "containerlogs",
+ containerIdStr), "logs");
+ }
+ nodeTd._().
+ td(".ts", Times.format(startTime)).
+ td(".ts", Times.format(finishTime)).
+ td(".dt", StringUtils.formatTime(elapsed)).
+ td(".note", Joiner.on('\n').join(ta.getDiagnostics()))._();
+ }
+ tbody._()._();
+ }
+
+ protected boolean isValidRequest() {
+ return app.task != null;
+ }
+
+ protected Collection<TaskAttempt> getTaskAttempts() {
+ return app.task.getAttempts().values();
+ }
+ }
+
+ @Override protected void preHead(Page.HTML<_> html) {
+ commonPreHead(html);
+ set(initID(ACCORDION, "nav"), "{autoHeight:false, active:2}");
+ set(DATATABLES_ID, "attempts");
+ set(initID(DATATABLES, "attempts"), attemptsTableInit());
+ setTableStyles(html, "attempts");
+ }
+
+ @Override protected Class<? extends SubView> content() {
+ return AttemptsBlock.class;
+ }
+
+ private String attemptsTableInit() {
+ return tableInit().append("}").toString();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java
new file mode 100644
index 0000000..c44453b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java
@@ -0,0 +1,100 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import com.google.inject.Inject;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.util.Times;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+
+import static org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp.*;
+import static org.apache.hadoop.yarn.util.StringHelper.*;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
+
+public class TasksBlock extends HtmlBlock {
+ final App app;
+
+ @Inject TasksBlock(App app) {
+ this.app = app;
+ }
+
+ @Override protected void render(Block html) {
+ if (app.job == null) {
+ html.
+ h2($(TITLE));
+ return;
+ }
+ TaskType type = null;
+ String symbol = $(TASK_TYPE);
+ if (!symbol.isEmpty()) {
+ type = MRApps.taskType(symbol);
+ }
+ TBODY<TABLE<Hamlet>> tbody = html.
+ table("#tasks").
+ thead().
+ tr().
+ th("Task").
+ th("Progress").
+ th("State").
+ th("Start Time").
+ th("Finish Time").
+ th("Elapsed Time")._()._().
+ tbody();
+ for (Task task : app.job.getTasks().values()) {
+ if (type != null && task.getType() != type) {
+ continue;
+ }
+ String tid = MRApps.toString(task.getID());
+ TaskReport report = task.getReport();
+ String pct = percent(report.getProgress());
+ long startTime = report.getStartTime();
+ long finishTime = report.getFinishTime();
+ long elapsed = Times.elapsed(startTime, finishTime);
+ tbody.
+ tr().
+ td().
+ br().$title(String.valueOf(task.getID().getId()))._(). // sorting
+ a(url("task", tid), tid)._().
+ td().
+ br().$title(pct)._().
+ div(_PROGRESSBAR).
+ $title(join(pct, '%')). // tooltip
+ div(_PROGRESSBAR_VALUE).
+ $style(join("width:", pct, '%'))._()._()._().
+ td(report.getTaskState().toString()).
+ td().
+ br().$title(String.valueOf(startTime))._().
+ _(Times.format(startTime))._().
+ td().
+ br().$title(String.valueOf(finishTime))._().
+ _(Times.format(finishTime))._().
+ td().
+ br().$title(String.valueOf(elapsed))._().
+ _(StringUtils.formatTime(elapsed))._()._();
+ }
+ tbody._()._();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java
new file mode 100644
index 0000000..607f92d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java
@@ -0,0 +1,45 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import org.apache.hadoop.yarn.webapp.SubView;
+
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
+
+public class TasksPage extends AppView {
+
+ @Override protected void preHead(Page.HTML<_> html) {
+ commonPreHead(html);
+ set(DATATABLES_ID, "tasks");
+ set(initID(ACCORDION, "nav"), "{autoHeight:false, active:2}");
+ set(initID(DATATABLES, "tasks"), tasksTableInit());
+ setTableStyles(html, "tasks");
+ }
+
+ @Override protected Class<? extends SubView> content() {
+ return TasksBlock.class;
+ }
+
+ private String tasksTableInit() {
+ return tableInit().
+ append(",aoColumns:[{sType:'title-numeric'},{sType:'title-numeric',").
+ append("bSearchable:false},null,{sType:'title-numeric'},").
+ append("{sType:'title-numeric'},{sType:'title-numeric'}]}").toString();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
new file mode 100644
index 0000000..35c4af0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
@@ -0,0 +1 @@
+org.apache.hadoop.mapreduce.v2.app.MRClientSecurityInfo
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
new file mode 100644
index 0000000..57f95dc
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
@@ -0,0 +1,423 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.EnumSet;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.WrappedJvmID;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
+import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
+import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
+import org.apache.hadoop.mapreduce.v2.MRConstants;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
+import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleaner;
+import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleanupEvent;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JHConfig;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.service.Service;
+import org.apache.hadoop.yarn.state.StateMachine;
+import org.apache.hadoop.yarn.state.StateMachineFactory;
+
+
+/**
+ * Mock MRAppMaster. Doesn't start RPC servers.
+ * No threads are started except of the event Dispatcher thread.
+ */
+public class MRApp extends MRAppMaster {
+ private static final Log LOG = LogFactory.getLog(MRApp.class);
+
+ int maps;
+ int reduces;
+
+ private File testWorkDir;
+ private Path testAbsPath;
+
+ private final RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(null);
+
+ //if true, tasks complete automatically as soon as they are launched
+ protected boolean autoComplete = false;
+
+ static ApplicationId applicationId;
+
+ static {
+ applicationId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class);
+ applicationId.setClusterTimestamp(0);
+ applicationId.setId(0);
+ }
+
+ public MRApp(int maps, int reduces, boolean autoComplete, String testName, boolean cleanOnStart) {
+ this(maps, reduces, autoComplete, testName, cleanOnStart, 1);
+ }
+
+ public MRApp(int maps, int reduces, boolean autoComplete, String testName, boolean cleanOnStart, int startCount) {
+ super(applicationId, startCount);
+ this.testWorkDir = new File("target", testName);
+ testAbsPath = new Path(testWorkDir.getAbsolutePath());
+ LOG.info("PathUsed: " + testAbsPath);
+ if (cleanOnStart) {
+ testAbsPath = new Path(testWorkDir.getAbsolutePath());
+ try {
+ FileContext.getLocalFSFileContext().delete(testAbsPath, true);
+ } catch (Exception e) {
+ LOG.warn("COULD NOT CLEANUP: " + testAbsPath, e);
+ throw new YarnException("could not cleanup test dir", e);
+ }
+ }
+
+ this.maps = maps;
+ this.reduces = reduces;
+ this.autoComplete = autoComplete;
+ }
+
+ public Job submit(Configuration conf) throws Exception {
+ String user = conf.get(MRJobConfig.USER_NAME, "mapred");
+ conf.set(MRJobConfig.USER_NAME, user);
+ conf.set(MRConstants.APPS_STAGING_DIR_KEY, testAbsPath.toString());
+ conf.setBoolean(JHConfig.CREATE_HISTORY_INTERMEDIATE_BASE_DIR_KEY, true);
+ //TODO: fix the bug where the speculator gets events with
+ //not-fully-constructed objects. For now, disable speculative exec
+ LOG.info("****DISABLING SPECULATIVE EXECUTION*****");
+ conf.setBoolean(MRJobConfig.MAP_SPECULATIVE, false);
+ conf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, false);
+
+ init(conf);
+ start();
+ DefaultMetricsSystem.shutdown();
+ Job job = getContext().getAllJobs().values().iterator().next();
+ return job;
+ }
+
+ public void waitForState(TaskAttempt attempt,
+ TaskAttemptState finalState) throws Exception {
+ int timeoutSecs = 0;
+ TaskAttemptReport report = attempt.getReport();
+ while (!finalState.equals(report.getTaskAttemptState()) &&
+ timeoutSecs++ < 20) {
+ System.out.println("TaskAttempt State is : " + report.getTaskAttemptState() +
+ " Waiting for state : " + finalState +
+ " progress : " + report.getProgress());
+ report = attempt.getReport();
+ Thread.sleep(500);
+ }
+ System.out.println("TaskAttempt State is : " + report.getTaskAttemptState());
+ Assert.assertEquals("TaskAttempt state is not correct (timedout)",
+ finalState,
+ report.getTaskAttemptState());
+ }
+
+ public void waitForState(Task task, TaskState finalState) throws Exception {
+ int timeoutSecs = 0;
+ TaskReport report = task.getReport();
+ while (!finalState.equals(report.getTaskState()) &&
+ timeoutSecs++ < 20) {
+ System.out.println("Task State is : " + report.getTaskState() +
+ " Waiting for state : " + finalState +
+ " progress : " + report.getProgress());
+ report = task.getReport();
+ Thread.sleep(500);
+ }
+ System.out.println("Task State is : " + report.getTaskState());
+ Assert.assertEquals("Task state is not correct (timedout)", finalState,
+ report.getTaskState());
+ }
+
+ public void waitForState(Job job, JobState finalState) throws Exception {
+ int timeoutSecs = 0;
+ JobReport report = job.getReport();
+ while (!finalState.equals(report.getJobState()) &&
+ timeoutSecs++ < 20) {
+ System.out.println("Job State is : " + report.getJobState() +
+ " Waiting for state : " + finalState +
+ " map progress : " + report.getMapProgress() +
+ " reduce progress : " + report.getReduceProgress());
+ report = job.getReport();
+ Thread.sleep(500);
+ }
+ System.out.println("Job State is : " + report.getJobState());
+ Assert.assertEquals("Job state is not correct (timedout)", finalState,
+ job.getState());
+ }
+
+ public void waitForState(Service.STATE finalState) throws Exception {
+ int timeoutSecs = 0;
+ while (!finalState.equals(getServiceState()) && timeoutSecs++ < 20) {
+ System.out.println("MRApp State is : " + getServiceState()
+ + " Waiting for state : " + finalState);
+ Thread.sleep(500);
+ }
+ System.out.println("MRApp State is : " + getServiceState());
+ Assert.assertEquals("MRApp state is not correct (timedout)", finalState,
+ getServiceState());
+ }
+
+ public void verifyCompleted() {
+ for (Job job : getContext().getAllJobs().values()) {
+ JobReport jobReport = job.getReport();
+ System.out.println("Job start time :" + jobReport.getStartTime());
+ System.out.println("Job finish time :" + jobReport.getFinishTime());
+ Assert.assertTrue("Job start time is not less than finish time",
+ jobReport.getStartTime() <= jobReport.getFinishTime());
+ Assert.assertTrue("Job finish time is in future",
+ jobReport.getFinishTime() <= System.currentTimeMillis());
+ for (Task task : job.getTasks().values()) {
+ TaskReport taskReport = task.getReport();
+ System.out.println("Task start time : " + taskReport.getStartTime());
+ System.out.println("Task finish time : " + taskReport.getFinishTime());
+ Assert.assertTrue("Task start time is not less than finish time",
+ taskReport.getStartTime() <= taskReport.getFinishTime());
+ for (TaskAttempt attempt : task.getAttempts().values()) {
+ TaskAttemptReport attemptReport = attempt.getReport();
+ Assert.assertTrue("Attempt start time is not less than finish time",
+ attemptReport.getStartTime() <= attemptReport.getFinishTime());
+ }
+ }
+ }
+ }
+
+ @Override
+ protected Job createJob(Configuration conf, Credentials fsTokens) {
+ Job newJob = new TestJob(getAppID(), getDispatcher().getEventHandler(),
+ getTaskAttemptListener(), getContext().getClock());
+ ((AppContext) getContext()).getAllJobs().put(newJob.getID(), newJob);
+
+ getDispatcher().register(JobFinishEvent.Type.class,
+ new EventHandler<JobFinishEvent>() {
+ @Override
+ public void handle(JobFinishEvent event) {
+ stop();
+ }
+ });
+
+ return newJob;
+ }
+
+ @Override
+ protected TaskAttemptListener createTaskAttemptListener(AppContext context) {
+ return new TaskAttemptListener(){
+ @Override
+ public InetSocketAddress getAddress() {
+ return null;
+ }
+ @Override
+ public void register(TaskAttemptId attemptID,
+ org.apache.hadoop.mapred.Task task, WrappedJvmID jvmID) {}
+ @Override
+ public void unregister(TaskAttemptId attemptID, WrappedJvmID jvmID) {
+ }
+ };
+ }
+
+ @Override
+ protected EventHandler<JobHistoryEvent> createJobHistoryHandler(
+ AppContext context) {//disable history
+ return new EventHandler<JobHistoryEvent>() {
+ @Override
+ public void handle(JobHistoryEvent event) {
+ }
+ };
+ }
+
+ @Override
+ protected ContainerLauncher createContainerLauncher(AppContext context,
+ boolean isLocal) {
+ return new MockContainerLauncher();
+ }
+
+ class MockContainerLauncher implements ContainerLauncher {
+ @Override
+ public void handle(ContainerLauncherEvent event) {
+ switch (event.getType()) {
+ case CONTAINER_REMOTE_LAUNCH:
+ getContext().getEventHandler().handle(
+ new TaskAttemptEvent(event.getTaskAttemptID(),
+ TaskAttemptEventType.TA_CONTAINER_LAUNCHED));
+
+ attemptLaunched(event.getTaskAttemptID());
+ break;
+ case CONTAINER_REMOTE_CLEANUP:
+ getContext().getEventHandler().handle(
+ new TaskAttemptEvent(event.getTaskAttemptID(),
+ TaskAttemptEventType.TA_CONTAINER_CLEANED));
+ break;
+ }
+ }
+ }
+
+ protected void attemptLaunched(TaskAttemptId attemptID) {
+ if (autoComplete) {
+ // send the done event
+ getContext().getEventHandler().handle(
+ new TaskAttemptEvent(attemptID,
+ TaskAttemptEventType.TA_DONE));
+ }
+ }
+
+ @Override
+ protected ContainerAllocator createContainerAllocator(
+ ClientService clientService, AppContext context, boolean isLocal) {
+ return new ContainerAllocator(){
+ private int containerCount;
+ @Override
+ public void handle(ContainerAllocatorEvent event) {
+ ContainerId cId = recordFactory.newRecordInstance(ContainerId.class);
+ cId.setAppId(getContext().getApplicationID());
+ cId.setId(containerCount++);
+ Container container = recordFactory.newRecordInstance(Container.class);
+ container.setId(cId);
+ container.setNodeId(recordFactory.newRecordInstance(NodeId.class));
+ container.getNodeId().setHost("dummy");
+ container.getNodeId().setPort(1234);
+ container.setContainerToken(null);
+ container.setNodeHttpAddress("localhost:9999");
+ getContext().getEventHandler().handle(
+ new TaskAttemptContainerAssignedEvent(event.getAttemptID(),
+ container));
+ }
+ };
+ }
+
+ @Override
+ protected TaskCleaner createTaskCleaner(AppContext context) {
+ return new TaskCleaner() {
+ @Override
+ public void handle(TaskCleanupEvent event) {
+ //send the cleanup done event
+ getContext().getEventHandler().handle(
+ new TaskAttemptEvent(event.getAttemptID(),
+ TaskAttemptEventType.TA_CLEANUP_DONE));
+ }
+ };
+ }
+
+ @Override
+ protected ClientService createClientService(AppContext context) {
+ return new ClientService(){
+ @Override
+ public InetSocketAddress getBindAddress() {
+ return null;
+ }
+
+ @Override
+ public int getHttpPort() {
+ return -1;
+ }
+ };
+ }
+
+ class TestJob extends JobImpl {
+ //override the init transition
+ StateMachineFactory<JobImpl, JobState, JobEventType, JobEvent> localFactory
+ = stateMachineFactory.addTransition(JobState.NEW,
+ EnumSet.of(JobState.INITED, JobState.FAILED),
+ JobEventType.JOB_INIT,
+ // This is abusive.
+ new TestInitTransition(getConfig(), maps, reduces));
+
+ private final StateMachine<JobState, JobEventType, JobEvent>
+ localStateMachine;
+
+ @Override
+ protected StateMachine<JobState, JobEventType, JobEvent> getStateMachine() {
+ return localStateMachine;
+ }
+
+ public TestJob(ApplicationId appID, EventHandler eventHandler,
+ TaskAttemptListener taskAttemptListener, Clock clock) {
+ super(appID, new Configuration(), eventHandler, taskAttemptListener,
+ new JobTokenSecretManager(), new Credentials(), clock, getStartCount(),
+ getCompletedTaskFromPreviousRun(), metrics);
+
+ // This "this leak" is okay because the retained pointer is in an
+ // instance variable.
+ localStateMachine = localFactory.make(this);
+ }
+ }
+
+ //Override InitTransition to not look for split files etc
+ static class TestInitTransition extends JobImpl.InitTransition {
+ private Configuration config;
+ private int maps;
+ private int reduces;
+ TestInitTransition(Configuration config, int maps, int reduces) {
+ this.config = config;
+ this.maps = maps;
+ this.reduces = reduces;
+ }
+ @Override
+ protected void setup(JobImpl job) throws IOException {
+ job.conf = config;
+ job.conf.setInt(MRJobConfig.NUM_REDUCES, reduces);
+ job.remoteJobConfFile = new Path("test");
+ }
+ @Override
+ protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
+ TaskSplitMetaInfo[] splits = new TaskSplitMetaInfo[maps];
+ for (int i = 0; i < maps ; i++) {
+ splits[i] = new TaskSplitMetaInfo();
+ }
+ return splits;
+ }
+ }
+
+}
+
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
new file mode 100644
index 0000000..74bb1a8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
@@ -0,0 +1,192 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+
+public class MRAppBenchmark {
+
+ private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+ /**
+ * Runs memory and time benchmark with Mock MRApp.
+ */
+ public void run(MRApp app) throws Exception {
+ Logger rootLogger = LogManager.getRootLogger();
+ rootLogger.setLevel(Level.WARN);
+ long startTime = System.currentTimeMillis();
+ Job job = app.submit(new Configuration());
+ while (!job.getReport().getJobState().equals(JobState.SUCCEEDED)) {
+ printStat(job, startTime);
+ Thread.sleep(2000);
+ }
+ printStat(job, startTime);
+ }
+
+ private void printStat(Job job, long startTime) throws Exception {
+ long currentTime = System.currentTimeMillis();
+ Runtime.getRuntime().gc();
+ long mem = Runtime.getRuntime().totalMemory()
+ - Runtime.getRuntime().freeMemory();
+ System.out.println("JobState:" + job.getState() +
+ " CompletedMaps:" + job.getCompletedMaps() +
+ " CompletedReduces:" + job.getCompletedReduces() +
+ " Memory(total-free)(KB):" + mem/1024 +
+ " ElapsedTime(ms):" + (currentTime - startTime));
+ }
+
+ //Throttles the maximum number of concurrent running tasks.
+ //This affects the memory requirement since
+ //org.apache.hadoop.mapred.MapTask/ReduceTask is loaded in memory for all
+ //running task and discarded once the task is launched.
+ static class ThrottledMRApp extends MRApp {
+
+ int maxConcurrentRunningTasks;
+ volatile int concurrentRunningTasks;
+ ThrottledMRApp(int maps, int reduces, int maxConcurrentRunningTasks) {
+ super(maps, reduces, true, "ThrottledMRApp", true);
+ this.maxConcurrentRunningTasks = maxConcurrentRunningTasks;
+ }
+
+ @Override
+ protected void attemptLaunched(TaskAttemptId attemptID) {
+ super.attemptLaunched(attemptID);
+ //the task is launched and sends done immediately
+ concurrentRunningTasks--;
+ }
+
+ @Override
+ protected ContainerAllocator createContainerAllocator(
+ ClientService clientService, AppContext context, boolean isLocal) {
+ return new ThrottledContainerAllocator();
+ }
+
+ class ThrottledContainerAllocator extends AbstractService
+ implements ContainerAllocator {
+ private int containerCount;
+ private Thread thread;
+ private BlockingQueue<ContainerAllocatorEvent> eventQueue =
+ new LinkedBlockingQueue<ContainerAllocatorEvent>();
+ public ThrottledContainerAllocator() {
+ super("ThrottledContainerAllocator");
+ }
+ @Override
+ public void handle(ContainerAllocatorEvent event) {
+ try {
+ eventQueue.put(event);
+ } catch (InterruptedException e) {
+ throw new YarnException(e);
+ }
+ }
+ @Override
+ public void start() {
+ thread = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ ContainerAllocatorEvent event = null;
+ while (!Thread.currentThread().isInterrupted()) {
+ try {
+ if (concurrentRunningTasks < maxConcurrentRunningTasks) {
+ event = eventQueue.take();
+ ContainerId cId = recordFactory.newRecordInstance(ContainerId.class);
+ cId.setAppId(getContext().getApplicationID());
+ cId.setId(containerCount++);
+ //System.out.println("Allocating " + containerCount);
+
+ Container container = recordFactory.newRecordInstance(Container.class);
+ container.setId(cId);
+ NodeId nodeId = recordFactory.newRecordInstance(NodeId.class);
+ nodeId.setHost("dummy");
+ nodeId.setPort(1234);
+ container.setNodeId(nodeId);
+ container.setContainerToken(null);
+ container.setNodeHttpAddress("localhost:9999");
+ getContext().getEventHandler()
+ .handle(
+ new TaskAttemptContainerAssignedEvent(event
+ .getAttemptID(), container));
+ concurrentRunningTasks++;
+ } else {
+ Thread.sleep(1000);
+ }
+ } catch (InterruptedException e) {
+ System.out.println("Returning, interrupted");
+ return;
+ }
+ }
+ }
+ });
+ thread.start();
+ super.start();
+ }
+
+ @Override
+ public void stop() {
+ thread.interrupt();
+ super.stop();
+ }
+ }
+ }
+
+ public void benchmark1() throws Exception {
+ int maps = 900;
+ int reduces = 100;
+ System.out.println("Running benchmark with maps:"+maps +
+ " reduces:"+reduces);
+ run(new MRApp(maps, reduces, true, this.getClass().getName(), true));
+ }
+
+ public void benchmark2() throws Exception {
+ int maps = 4000;
+ int reduces = 1000;
+ int maxConcurrentRunningTasks = 500;
+
+ System.out.println("Running benchmark with throttled running tasks with " +
+ "maxConcurrentRunningTasks:" + maxConcurrentRunningTasks +
+ " maps:" + maps + " reduces:" + reduces);
+ run(new ThrottledMRApp(maps, reduces, maxConcurrentRunningTasks));
+ }
+
+ public static void main(String[] args) throws Exception {
+ MRAppBenchmark benchmark = new MRAppBenchmark();
+ benchmark.benchmark1();
+ benchmark.benchmark2();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
new file mode 100644
index 0000000..dc24373
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
@@ -0,0 +1,453 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import org.apache.hadoop.mapreduce.FileSystemCounter;
+import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.mapreduce.JobCounter;
+import org.apache.hadoop.mapreduce.TaskCounter;
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.Phase;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.MockApps;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.util.Records;
+
+public class MockJobs extends MockApps {
+ static final Iterator<JobState> JOB_STATES = Iterators.cycle(
+ JobState.values());
+ static final Iterator<TaskState> TASK_STATES = Iterators.cycle(
+ TaskState.values());
+ static final Iterator<TaskAttemptState> TASK_ATTEMPT_STATES = Iterators.cycle(
+ TaskAttemptState.values());
+ static final Iterator<TaskType> TASK_TYPES = Iterators.cycle(
+ TaskType.values());
+ static final Iterator<JobCounter> JOB_COUNTERS = Iterators.cycle(
+ JobCounter.values());
+ static final Iterator<FileSystemCounter> FS_COUNTERS = Iterators.cycle(
+ FileSystemCounter.values());
+ static final Iterator<TaskCounter> TASK_COUNTERS = Iterators.cycle(
+ TaskCounter.values());
+ static final Iterator<String> FS_SCHEMES = Iterators.cycle("FILE", "HDFS",
+ "LAFS", "CEPH");
+ static final Iterator<String> USER_COUNTER_GROUPS = Iterators.cycle(
+ "com.company.project.subproject.component.subcomponent.UserDefinedSpecificSpecialTask$Counters",
+ "PigCounters");
+ static final Iterator<String> USER_COUNTERS = Iterators.cycle(
+ "counter1", "counter2", "counter3");
+ static final Iterator<Phase> PHASES = Iterators.cycle(Phase.values());
+ static final Iterator<String> DIAGS = Iterators.cycle(
+ "Error: java.lang.OutOfMemoryError: Java heap space",
+ "Lost task tracker: tasktracker.domain/127.0.0.1:40879");
+
+ static final int DT = 1000000; // ms
+
+ public static String newJobName() {
+ return newAppName();
+ }
+
+ public static Map<JobId, Job> newJobs(ApplicationId appID, int numJobsPerApp,
+ int numTasksPerJob,
+ int numAttemptsPerTask) {
+ Map<JobId, Job> map = Maps.newHashMap();
+ for (int j = 0; j < numJobsPerApp; ++j) {
+ Job job = newJob(appID, j, numTasksPerJob, numAttemptsPerTask);
+ map.put(job.getID(), job);
+ }
+ return map;
+ }
+
+ public static JobId newJobID(ApplicationId appID, int i) {
+ JobId id = Records.newRecord(JobId.class);
+ id.setAppId(appID);
+ id.setId(i);
+ return id;
+ }
+
+ public static JobReport newJobReport(JobId id) {
+ JobReport report = Records.newRecord(JobReport.class);
+ report.setJobId(id);
+ report.setStartTime(System.currentTimeMillis() - (int)(Math.random() * DT));
+ report.setFinishTime(System.currentTimeMillis() + (int)(Math.random() * DT) + 1);
+ report.setMapProgress((float)Math.random());
+ report.setReduceProgress((float)Math.random());
+ report.setJobState(JOB_STATES.next());
+ return report;
+ }
+
+ public static TaskReport newTaskReport(TaskId id) {
+ TaskReport report = Records.newRecord(TaskReport.class);
+ report.setTaskId(id);
+ report.setStartTime(System.currentTimeMillis() - (int)(Math.random() * DT));
+ report.setFinishTime(System.currentTimeMillis() + (int)(Math.random() * DT) + 1);
+ report.setProgress((float)Math.random());
+ report.setCounters(newCounters());
+ report.setTaskState(TASK_STATES.next());
+ return report;
+ }
+
+ public static TaskAttemptReport newTaskAttemptReport(TaskAttemptId id) {
+ TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
+ report.setTaskAttemptId(id);
+ report.setStartTime(System.currentTimeMillis() - (int)(Math.random() * DT));
+ report.setFinishTime(System.currentTimeMillis() + (int)(Math.random() * DT) + 1);
+ report.setPhase(PHASES.next());
+ report.setTaskAttemptState(TASK_ATTEMPT_STATES.next());
+ report.setProgress((float)Math.random());
+ report.setCounters(newCounters());
+ return report;
+ }
+
+ @SuppressWarnings("deprecation")
+ public static Counters newCounters() {
+ org.apache.hadoop.mapred.Counters hc =
+ new org.apache.hadoop.mapred.Counters();
+ for (JobCounter c : JobCounter.values()) {
+ hc.findCounter(c).setValue((long)(Math.random() * 1000));
+ }
+ for (TaskCounter c : TaskCounter.values()) {
+ hc.findCounter(c).setValue((long)(Math.random() * 1000));
+ }
+ int nc = FileSystemCounter.values().length * 4;
+ for (int i = 0; i < nc; ++i) {
+ for (FileSystemCounter c : FileSystemCounter.values()) {
+ hc.findCounter(FS_SCHEMES.next(), c).
+ setValue((long)(Math.random() * DT));
+ }
+ }
+ for (int i = 0; i < 2 * 3; ++i) {
+ hc.findCounter(USER_COUNTER_GROUPS.next(), USER_COUNTERS.next()).
+ setValue((long)(Math.random() * 100000));
+ }
+ return TypeConverter.toYarn(hc);
+ }
+
+ public static Map<TaskAttemptId, TaskAttempt> newTaskAttempts(TaskId tid,
+ int m) {
+ Map<TaskAttemptId, TaskAttempt> map = Maps.newHashMap();
+ for (int i = 0; i < m; ++i) {
+ TaskAttempt ta = newTaskAttempt(tid, i);
+ map.put(ta.getID(), ta);
+ }
+ return map;
+ }
+
+ public static TaskAttempt newTaskAttempt(TaskId tid, int i) {
+ final TaskAttemptId taid = Records.newRecord(TaskAttemptId.class);
+ taid.setTaskId(tid);
+ taid.setId(i);
+ final TaskAttemptReport report = newTaskAttemptReport(taid);
+ final List<String> diags = Lists.newArrayList();
+ diags.add(DIAGS.next());
+ return new TaskAttempt() {
+ @Override
+ public TaskAttemptId getID() {
+ return taid;
+ }
+
+ @Override
+ public TaskAttemptReport getReport() {
+ return report;
+ }
+
+ @Override
+ public long getLaunchTime() {
+ return 0;
+ }
+
+ @Override
+ public long getFinishTime() {
+ return 0;
+ }
+
+ @Override
+ public Counters getCounters() {
+ return report.getCounters();
+ }
+
+ @Override
+ public float getProgress() {
+ return report.getProgress();
+ }
+
+ @Override
+ public TaskAttemptState getState() {
+ return report.getTaskAttemptState();
+ }
+
+ @Override
+ public boolean isFinished() {
+ switch (report.getTaskAttemptState()) {
+ case SUCCEEDED:
+ case FAILED:
+ case KILLED: return true;
+ }
+ return false;
+ }
+
+ @Override
+ public ContainerId getAssignedContainerID() {
+ ContainerId id = Records.newRecord(ContainerId.class);
+ id.setAppId(taid.getTaskId().getJobId().getAppId());
+ return id;
+ }
+
+ @Override
+ public String getNodeHttpAddress() {
+ return "localhost:9999";
+ }
+
+ @Override
+ public List<String> getDiagnostics() {
+ return diags;
+ }
+
+ @Override
+ public String getAssignedContainerMgrAddress() {
+ return "localhost:9998";
+ }
+ };
+ }
+
+ public static Map<TaskId, Task> newTasks(JobId jid, int n, int m) {
+ Map<TaskId, Task> map = Maps.newHashMap();
+ for (int i = 0; i < n; ++i) {
+ Task task = newTask(jid, i, m);
+ map.put(task.getID(), task);
+ }
+ return map;
+ }
+
+ public static Task newTask(JobId jid, int i, int m) {
+ final TaskId tid = Records.newRecord(TaskId.class);
+ tid.setJobId(jid);
+ tid.setId(i);
+ tid.setTaskType(TASK_TYPES.next());
+ final TaskReport report = newTaskReport(tid);
+ final Map<TaskAttemptId, TaskAttempt> attempts = newTaskAttempts(tid, m);
+ return new Task() {
+ @Override
+ public TaskId getID() {
+ return tid;
+ }
+
+ @Override
+ public TaskReport getReport() {
+ return report;
+ }
+
+ @Override
+ public Counters getCounters() {
+ return report.getCounters();
+ }
+
+ @Override
+ public float getProgress() {
+ return report.getProgress();
+ }
+
+ @Override
+ public TaskType getType() {
+ return tid.getTaskType();
+ }
+
+ @Override
+ public Map<TaskAttemptId, TaskAttempt> getAttempts() {
+ return attempts;
+ }
+
+ @Override
+ public TaskAttempt getAttempt(TaskAttemptId attemptID) {
+ return attempts.get(attemptID);
+ }
+
+ @Override
+ public boolean isFinished() {
+ switch (report.getTaskState()) {
+ case SUCCEEDED:
+ case KILLED:
+ case FAILED: return true;
+ }
+ return false;
+ }
+
+ @Override
+ public boolean canCommit(TaskAttemptId taskAttemptID) {
+ return false;
+ }
+
+ @Override
+ public TaskState getState() {
+ return report.getTaskState();
+ }
+ };
+ }
+
+ public static Counters getCounters(Collection<Task> tasks) {
+ Counters counters = JobImpl.newCounters();
+ return JobImpl.incrTaskCounters(counters, tasks);
+ }
+
+ static class TaskCount {
+ int maps;
+ int reduces;
+ int completedMaps;
+ int completedReduces;
+
+ void incr(Task task) {
+ TaskType type = task.getType();
+ boolean finished = task.isFinished();
+ if (type == TaskType.MAP) {
+ if (finished) {
+ ++completedMaps;
+ }
+ ++maps;
+ } else if (type == TaskType.REDUCE) {
+ if (finished) {
+ ++completedReduces;
+ }
+ ++reduces;
+ }
+ }
+ }
+
+ static TaskCount getTaskCount(Collection<Task> tasks) {
+ TaskCount tc = new TaskCount();
+ for (Task task : tasks) {
+ tc.incr(task);
+ }
+ return tc;
+ }
+
+ public static Job newJob(ApplicationId appID, int i, int n, int m) {
+ final JobId id = newJobID(appID, i);
+ final String name = newJobName();
+ final JobReport report = newJobReport(id);
+ final Map<TaskId, Task> tasks = newTasks(id, n, m);
+ final TaskCount taskCount = getTaskCount(tasks.values());
+ final Counters counters = getCounters(tasks.values());
+ return new Job() {
+ @Override
+ public JobId getID() {
+ return id;
+ }
+
+ @Override
+ public String getName() {
+ return name;
+ }
+
+ @Override
+ public JobState getState() {
+ return report.getJobState();
+ }
+
+ @Override
+ public JobReport getReport() {
+ return report;
+ }
+
+ @Override
+ public Counters getCounters() {
+ return counters;
+ }
+
+ @Override
+ public Map<TaskId, Task> getTasks() {
+ return tasks;
+ }
+
+ @Override
+ public Task getTask(TaskId taskID) {
+ return tasks.get(taskID);
+ }
+
+ @Override
+ public int getTotalMaps() {
+ return taskCount.maps;
+ }
+
+ @Override
+ public int getTotalReduces() {
+ return taskCount.reduces;
+ }
+
+ @Override
+ public int getCompletedMaps() {
+ return taskCount.completedMaps;
+ }
+
+ @Override
+ public int getCompletedReduces() {
+ return taskCount.completedReduces;
+ }
+
+ @Override
+ public boolean isUber() {
+ return false;
+ }
+
+ @Override
+ public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
+ int fromEventId, int maxEvents) {
+ return null;
+ }
+
+ @Override
+ public Map<TaskId, Task> getTasks(TaskType taskType) {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public List<String> getDiagnostics() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public boolean checkAccess(UserGroupInformation callerUGI,
+ JobACL jobOperation) {
+ return true;
+ }
+ };
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java
new file mode 100644
index 0000000..4744035
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java
@@ -0,0 +1,236 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import java.util.Iterator;
+import java.util.Map;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapred.TaskAttemptListenerImpl;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.junit.Test;
+
+/**
+ * Tests the state machine with respect to Job/Task/TaskAttempt failure
+ * scenarios.
+ */
+public class TestFail {
+
+ @Test
+ //First attempt is failed and second attempt is passed
+ //The job succeeds.
+ public void testFailTask() throws Exception {
+ MRApp app = new MockFirstFailingAttemptMRApp(1, 0);
+ Configuration conf = new Configuration();
+ // this test requires two task attempts, but uberization overrides max to 1
+ conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
+ Job job = app.submit(conf);
+ app.waitForState(job, JobState.SUCCEEDED);
+ Map<TaskId,Task> tasks = job.getTasks();
+ Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
+ Task task = tasks.values().iterator().next();
+ Assert.assertEquals("Task state not correct", TaskState.SUCCEEDED,
+ task.getReport().getTaskState());
+ Map<TaskAttemptId, TaskAttempt> attempts =
+ tasks.values().iterator().next().getAttempts();
+ Assert.assertEquals("Num attempts is not correct", 2, attempts.size());
+ //one attempt must be failed
+ //and another must have succeeded
+ Iterator<TaskAttempt> it = attempts.values().iterator();
+ Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
+ it.next().getReport().getTaskAttemptState());
+ Assert.assertEquals("Attempt state not correct", TaskAttemptState.SUCCEEDED,
+ it.next().getReport().getTaskAttemptState());
+ }
+
+ @Test
+ public void testMapFailureMaxPercent() throws Exception {
+ MRApp app = new MockFirstFailingTaskMRApp(4, 0);
+ Configuration conf = new Configuration();
+
+ //reduce the no of attempts so test run faster
+ conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 2);
+ conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 1);
+
+ conf.setInt(MRJobConfig.MAP_FAILURES_MAX_PERCENT, 20);
+ conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
+ Job job = app.submit(conf);
+ app.waitForState(job, JobState.FAILED);
+
+ //setting the failure percentage to 25% (1/4 is 25) will
+ //make the Job successful
+ app = new MockFirstFailingTaskMRApp(4, 0);
+ conf = new Configuration();
+
+ //reduce the no of attempts so test run faster
+ conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 2);
+ conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 1);
+
+ conf.setInt(MRJobConfig.MAP_FAILURES_MAX_PERCENT, 25);
+ conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
+ job = app.submit(conf);
+ app.waitForState(job, JobState.SUCCEEDED);
+ }
+
+ @Test
+ public void testReduceFailureMaxPercent() throws Exception {
+ MRApp app = new MockFirstFailingTaskMRApp(2, 4);
+ Configuration conf = new Configuration();
+
+ //reduce the no of attempts so test run faster
+ conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
+ conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 2);
+
+ conf.setInt(MRJobConfig.MAP_FAILURES_MAX_PERCENT, 50);//no failure due to Map
+ conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
+ conf.setInt(MRJobConfig.REDUCE_FAILURES_MAXPERCENT, 20);
+ conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 1);
+ Job job = app.submit(conf);
+ app.waitForState(job, JobState.FAILED);
+
+ //setting the failure percentage to 25% (1/4 is 25) will
+ //make the Job successful
+ app = new MockFirstFailingTaskMRApp(2, 4);
+ conf = new Configuration();
+
+ //reduce the no of attempts so test run faster
+ conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
+ conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 2);
+
+ conf.setInt(MRJobConfig.MAP_FAILURES_MAX_PERCENT, 50);//no failure due to Map
+ conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
+ conf.setInt(MRJobConfig.REDUCE_FAILURES_MAXPERCENT, 25);
+ conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 1);
+ job = app.submit(conf);
+ app.waitForState(job, JobState.SUCCEEDED);
+ }
+
+ @Test
+ //All Task attempts are timed out, leading to Job failure
+ public void testTimedOutTask() throws Exception {
+ MRApp app = new TimeOutTaskMRApp(1, 0);
+ Configuration conf = new Configuration();
+ int maxAttempts = 2;
+ conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
+ // disable uberization (requires entire job to be reattempted, so max for
+ // subtask attempts is overridden to 1)
+ conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
+ Job job = app.submit(conf);
+ app.waitForState(job, JobState.FAILED);
+ Map<TaskId,Task> tasks = job.getTasks();
+ Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
+ Task task = tasks.values().iterator().next();
+ Assert.assertEquals("Task state not correct", TaskState.FAILED,
+ task.getReport().getTaskState());
+ Map<TaskAttemptId, TaskAttempt> attempts =
+ tasks.values().iterator().next().getAttempts();
+ Assert.assertEquals("Num attempts is not correct", maxAttempts,
+ attempts.size());
+ for (TaskAttempt attempt : attempts.values()) {
+ Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
+ attempt.getReport().getTaskAttemptState());
+ }
+ }
+
+ static class TimeOutTaskMRApp extends MRApp {
+ TimeOutTaskMRApp(int maps, int reduces) {
+ super(maps, reduces, false, "TimeOutTaskMRApp", true);
+ }
+ @Override
+ protected TaskAttemptListener createTaskAttemptListener(AppContext context) {
+ //This will create the TaskAttemptListener with TaskHeartbeatHandler
+ //RPC servers are not started
+ //task time out is reduced
+ //when attempt times out, heartbeat handler will send the lost event
+ //leading to Attempt failure
+ return new TaskAttemptListenerImpl(getContext(), null) {
+ public void startRpcServer(){};
+ public void stopRpcServer(){};
+ public void init(Configuration conf) {
+ conf.setInt("mapreduce.task.timeout", 1*1000);//reduce timeout
+ super.init(conf);
+ }
+ };
+ }
+ }
+
+ //Attempts of first Task are failed
+ static class MockFirstFailingTaskMRApp extends MRApp {
+
+ MockFirstFailingTaskMRApp(int maps, int reduces) {
+ super(maps, reduces, true, "MockFirstFailingTaskMRApp", true);
+ }
+
+ @Override
+ protected void attemptLaunched(TaskAttemptId attemptID) {
+ if (attemptID.getTaskId().getId() == 0) {//check if it is first task
+ // send the Fail event
+ getContext().getEventHandler().handle(
+ new TaskAttemptEvent(attemptID,
+ TaskAttemptEventType.TA_FAILMSG));
+ } else {
+ getContext().getEventHandler().handle(
+ new TaskAttemptEvent(attemptID,
+ TaskAttemptEventType.TA_DONE));
+ }
+ }
+ }
+
+ //First attempt is failed
+ static class MockFirstFailingAttemptMRApp extends MRApp {
+ MockFirstFailingAttemptMRApp(int maps, int reduces) {
+ super(maps, reduces, true, "MockFirstFailingAttemptMRApp", true);
+ }
+
+ @Override
+ protected void attemptLaunched(TaskAttemptId attemptID) {
+ if (attemptID.getTaskId().getId() == 0 && attemptID.getId() == 0) {
+ //check if it is first task's first attempt
+ // send the Fail event
+ getContext().getEventHandler().handle(
+ new TaskAttemptEvent(attemptID,
+ TaskAttemptEventType.TA_FAILMSG));
+ } else {
+ getContext().getEventHandler().handle(
+ new TaskAttemptEvent(attemptID,
+ TaskAttemptEventType.TA_DONE));
+ }
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ TestFail t = new TestFail();
+ t.testFailTask();
+ t.testTimedOutTask();
+ t.testMapFailureMaxPercent();
+ t.testReduceFailureMaxPercent();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java
new file mode 100644
index 0000000..827e727
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java
@@ -0,0 +1,153 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import java.util.Arrays;
+import java.util.Iterator;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptFetchFailureEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.junit.Test;
+
+public class TestFetchFailure {
+
+ @Test
+ public void testFetchFailure() throws Exception {
+ MRApp app = new MRApp(1, 1, false, this.getClass().getName(), true);
+ Configuration conf = new Configuration();
+ // map -> reduce -> fetch-failure -> map retry is incompatible with
+ // sequential, single-task-attempt approach in uber-AM, so disable:
+ conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
+ Job job = app.submit(conf);
+ app.waitForState(job, JobState.RUNNING);
+ //all maps would be running
+ Assert.assertEquals("Num tasks not correct",
+ 2, job.getTasks().size());
+ Iterator<Task> it = job.getTasks().values().iterator();
+ Task mapTask = it.next();
+ Task reduceTask = it.next();
+
+ //wait for Task state move to RUNNING
+ app.waitForState(mapTask, TaskState.RUNNING);
+ TaskAttempt mapAttempt1 = mapTask.getAttempts().values().iterator().next();
+ app.waitForState(mapAttempt1, TaskAttemptState.RUNNING);
+
+ //send the done signal to the map attempt
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptEvent(mapAttempt1.getID(),
+ TaskAttemptEventType.TA_DONE));
+
+ // wait for map success
+ app.waitForState(mapTask, TaskState.SUCCEEDED);
+
+ TaskAttemptCompletionEvent[] events =
+ job.getTaskAttemptCompletionEvents(0, 100);
+ Assert.assertEquals("Num completion events not correct",
+ 1, events.length);
+ Assert.assertEquals("Event status not correct",
+ TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus());
+
+ // wait for reduce to start running
+ app.waitForState(reduceTask, TaskState.RUNNING);
+ TaskAttempt reduceAttempt =
+ reduceTask.getAttempts().values().iterator().next();
+ app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
+
+ //send 3 fetch failures from reduce to trigger map re execution
+ sendFetchFailure(app, reduceAttempt, mapAttempt1);
+ sendFetchFailure(app, reduceAttempt, mapAttempt1);
+ sendFetchFailure(app, reduceAttempt, mapAttempt1);
+
+ //wait for map Task state move back to RUNNING
+ app.waitForState(mapTask, TaskState.RUNNING);
+
+ //map attempt must have become FAILED
+ Assert.assertEquals("Map TaskAttempt state not correct",
+ TaskAttemptState.FAILED, mapAttempt1.getState());
+
+ Assert.assertEquals("Num attempts in Map Task not correct",
+ 2, mapTask.getAttempts().size());
+
+ Iterator<TaskAttempt> atIt = mapTask.getAttempts().values().iterator();
+ atIt.next();
+ TaskAttempt mapAttempt2 = atIt.next();
+
+ app.waitForState(mapAttempt2, TaskAttemptState.RUNNING);
+ //send the done signal to the second map attempt
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptEvent(mapAttempt2.getID(),
+ TaskAttemptEventType.TA_DONE));
+
+ // wait for map success
+ app.waitForState(mapTask, TaskState.SUCCEEDED);
+
+ //send done to reduce
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptEvent(reduceAttempt.getID(),
+ TaskAttemptEventType.TA_DONE));
+
+ app.waitForState(job, JobState.SUCCEEDED);
+
+ //previous completion event now becomes obsolete
+ Assert.assertEquals("Event status not correct",
+ TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
+
+ events = job.getTaskAttemptCompletionEvents(0, 100);
+ Assert.assertEquals("Num completion events not correct",
+ 4, events.length);
+ Assert.assertEquals("Event map attempt id not correct",
+ mapAttempt1.getID(), events[0].getAttemptId());
+ Assert.assertEquals("Event map attempt id not correct",
+ mapAttempt1.getID(), events[1].getAttemptId());
+ Assert.assertEquals("Event map attempt id not correct",
+ mapAttempt2.getID(), events[2].getAttemptId());
+ Assert.assertEquals("Event redude attempt id not correct",
+ reduceAttempt.getID(), events[3].getAttemptId());
+ Assert.assertEquals("Event status not correct for map attempt1",
+ TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
+ Assert.assertEquals("Event status not correct for map attempt1",
+ TaskAttemptCompletionEventStatus.FAILED, events[1].getStatus());
+ Assert.assertEquals("Event status not correct for map attempt2",
+ TaskAttemptCompletionEventStatus.SUCCEEDED, events[2].getStatus());
+ Assert.assertEquals("Event status not correct for reduce attempt1",
+ TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus());
+ }
+
+ private void sendFetchFailure(MRApp app, TaskAttempt reduceAttempt,
+ TaskAttempt mapAttempt) {
+ app.getContext().getEventHandler().handle(
+ new JobTaskAttemptFetchFailureEvent(
+ reduceAttempt.getID(),
+ Arrays.asList(new TaskAttemptId[] {mapAttempt.getID()})));
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
new file mode 100644
index 0000000..3533c28
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
@@ -0,0 +1,221 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
+import org.junit.Test;
+
+/**
+ * Tests the state machine with respect to Job/Task/TaskAttempt kill scenarios.
+ *
+ */
+public class TestKill {
+
+ @Test
+ public void testKillJob() throws Exception {
+ final CountDownLatch latch = new CountDownLatch(1);
+
+ MRApp app = new BlockingMRApp(1, 0, latch);
+ //this will start the job but job won't complete as task is
+ //blocked
+ Job job = app.submit(new Configuration());
+
+ //wait and vailidate for Job to become RUNNING
+ app.waitForState(job, JobState.RUNNING);
+
+ //send the kill signal to Job
+ app.getContext().getEventHandler().handle(
+ new JobEvent(job.getID(), JobEventType.JOB_KILL));
+
+ //unblock Task
+ latch.countDown();
+
+ //wait and validate for Job to be KILLED
+ app.waitForState(job, JobState.KILLED);
+ Map<TaskId,Task> tasks = job.getTasks();
+ Assert.assertEquals("No of tasks is not correct", 1,
+ tasks.size());
+ Task task = tasks.values().iterator().next();
+ Assert.assertEquals("Task state not correct", TaskState.KILLED,
+ task.getReport().getTaskState());
+ Map<TaskAttemptId, TaskAttempt> attempts =
+ tasks.values().iterator().next().getAttempts();
+ Assert.assertEquals("No of attempts is not correct", 1,
+ attempts.size());
+ Iterator<TaskAttempt> it = attempts.values().iterator();
+ Assert.assertEquals("Attempt state not correct", TaskAttemptState.KILLED,
+ it.next().getReport().getTaskAttemptState());
+ }
+
+ @Test
+ public void testKillTask() throws Exception {
+ final CountDownLatch latch = new CountDownLatch(1);
+ MRApp app = new BlockingMRApp(2, 0, latch);
+ //this will start the job but job won't complete as Task is blocked
+ Job job = app.submit(new Configuration());
+
+ //wait and vailidate for Job to become RUNNING
+ app.waitForState(job, JobState.RUNNING);
+ Map<TaskId,Task> tasks = job.getTasks();
+ Assert.assertEquals("No of tasks is not correct", 2,
+ tasks.size());
+ Iterator<Task> it = tasks.values().iterator();
+ Task task1 = it.next();
+ Task task2 = it.next();
+
+ //send the kill signal to the first Task
+ app.getContext().getEventHandler().handle(
+ new TaskEvent(task1.getID(), TaskEventType.T_KILL));
+
+ //unblock Task
+ latch.countDown();
+
+ //wait and validate for Job to become SUCCEEDED
+ app.waitForState(job, JobState.SUCCEEDED);
+
+ //first Task is killed and second is Succeeded
+ //Job is succeeded
+
+ Assert.assertEquals("Task state not correct", TaskState.KILLED,
+ task1.getReport().getTaskState());
+ Assert.assertEquals("Task state not correct", TaskState.SUCCEEDED,
+ task2.getReport().getTaskState());
+ Map<TaskAttemptId, TaskAttempt> attempts = task1.getAttempts();
+ Assert.assertEquals("No of attempts is not correct", 1,
+ attempts.size());
+ Iterator<TaskAttempt> iter = attempts.values().iterator();
+ Assert.assertEquals("Attempt state not correct", TaskAttemptState.KILLED,
+ iter.next().getReport().getTaskAttemptState());
+
+ attempts = task2.getAttempts();
+ Assert.assertEquals("No of attempts is not correct", 1,
+ attempts.size());
+ iter = attempts.values().iterator();
+ Assert.assertEquals("Attempt state not correct", TaskAttemptState.SUCCEEDED,
+ iter.next().getReport().getTaskAttemptState());
+ }
+
+ @Test
+ public void testKillTaskAttempt() throws Exception {
+ final CountDownLatch latch = new CountDownLatch(1);
+ MRApp app = new BlockingMRApp(2, 0, latch);
+ //this will start the job but job won't complete as Task is blocked
+ Job job = app.submit(new Configuration());
+
+ //wait and vailidate for Job to become RUNNING
+ app.waitForState(job, JobState.RUNNING);
+ Map<TaskId,Task> tasks = job.getTasks();
+ Assert.assertEquals("No of tasks is not correct", 2,
+ tasks.size());
+ Iterator<Task> it = tasks.values().iterator();
+ Task task1 = it.next();
+ Task task2 = it.next();
+
+ //wait for tasks to become running
+ app.waitForState(task1, TaskState.SCHEDULED);
+ app.waitForState(task2, TaskState.SCHEDULED);
+
+ //send the kill signal to the first Task's attempt
+ TaskAttempt attempt = task1.getAttempts().values().iterator().next();
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptEvent(attempt.getID(), TaskAttemptEventType.TA_KILL));
+
+ //unblock
+ latch.countDown();
+
+ //wait and validate for Job to become SUCCEEDED
+ //job will still succeed
+ app.waitForState(job, JobState.SUCCEEDED);
+
+ //first Task will have two attempts 1st is killed, 2nd Succeeds
+ //both Tasks and Job succeeds
+ Assert.assertEquals("Task state not correct", TaskState.SUCCEEDED,
+ task1.getReport().getTaskState());
+ Assert.assertEquals("Task state not correct", TaskState.SUCCEEDED,
+ task2.getReport().getTaskState());
+
+ Map<TaskAttemptId, TaskAttempt> attempts = task1.getAttempts();
+ Assert.assertEquals("No of attempts is not correct", 2,
+ attempts.size());
+ Iterator<TaskAttempt> iter = attempts.values().iterator();
+ Assert.assertEquals("Attempt state not correct", TaskAttemptState.KILLED,
+ iter.next().getReport().getTaskAttemptState());
+ Assert.assertEquals("Attempt state not correct", TaskAttemptState.SUCCEEDED,
+ iter.next().getReport().getTaskAttemptState());
+
+ attempts = task2.getAttempts();
+ Assert.assertEquals("No of attempts is not correct", 1,
+ attempts.size());
+ iter = attempts.values().iterator();
+ Assert.assertEquals("Attempt state not correct", TaskAttemptState.SUCCEEDED,
+ iter.next().getReport().getTaskAttemptState());
+ }
+
+ static class BlockingMRApp extends MRApp {
+ private CountDownLatch latch;
+ BlockingMRApp(int maps, int reduces, CountDownLatch latch) {
+ super(maps, reduces, true, "testKill", true);
+ this.latch = latch;
+ }
+
+ @Override
+ protected void attemptLaunched(TaskAttemptId attemptID) {
+ if (attemptID.getTaskId().getId() == 0 && attemptID.getId() == 0) {
+ //this blocks the first task's first attempt
+ //the subsequent ones are completed
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ } else {
+ getContext().getEventHandler().handle(
+ new TaskAttemptEvent(attemptID,
+ TaskAttemptEventType.TA_DONE));
+ }
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ TestKill t = new TestKill();
+ t.testKillJob();
+ t.testKillTask();
+ t.testKillTaskAttempt();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java
new file mode 100644
index 0000000..c64335a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java
@@ -0,0 +1,201 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import java.util.Iterator;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
+import org.junit.Test;
+
+/**
+ * Tests the state machine of MR App.
+ */
+public class TestMRApp {
+
+ @Test
+ public void testMapReduce() throws Exception {
+ MRApp app = new MRApp(2, 2, true, this.getClass().getName(), true);
+ Job job = app.submit(new Configuration());
+ app.waitForState(job, JobState.SUCCEEDED);
+ app.verifyCompleted();
+ }
+
+ @Test
+ public void testZeroMaps() throws Exception {
+ MRApp app = new MRApp(0, 1, true, this.getClass().getName(), true);
+ Job job = app.submit(new Configuration());
+ app.waitForState(job, JobState.SUCCEEDED);
+ app.verifyCompleted();
+ }
+
+ @Test
+ public void testZeroMapReduces() throws Exception{
+ MRApp app = new MRApp(0, 0, true, this.getClass().getName(), true);
+ Job job = app.submit(new Configuration());
+ app.waitForState(job, JobState.SUCCEEDED);
+ }
+
+ @Test
+ public void testCommitPending() throws Exception {
+ MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
+ Job job = app.submit(new Configuration());
+ app.waitForState(job, JobState.RUNNING);
+ Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
+ Iterator<Task> it = job.getTasks().values().iterator();
+ Task task = it.next();
+ app.waitForState(task, TaskState.RUNNING);
+ TaskAttempt attempt = task.getAttempts().values().iterator().next();
+ app.waitForState(attempt, TaskAttemptState.RUNNING);
+
+ //send the commit pending signal to the task
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptEvent(
+ attempt.getID(),
+ TaskAttemptEventType.TA_COMMIT_PENDING));
+
+ //wait for first attempt to commit pending
+ app.waitForState(attempt, TaskAttemptState.COMMIT_PENDING);
+
+ //send the done signal to the task
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptEvent(
+ task.getAttempts().values().iterator().next().getID(),
+ TaskAttemptEventType.TA_DONE));
+
+ app.waitForState(job, JobState.SUCCEEDED);
+ }
+
+ //@Test
+ public void testCompletedMapsForReduceSlowstart() throws Exception {
+ MRApp app = new MRApp(2, 1, false, this.getClass().getName(), true);
+ Configuration conf = new Configuration();
+ //after half of the map completion, reduce will start
+ conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 0.5f);
+ //uberization forces full slowstart (1.0), so disable that
+ conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
+ Job job = app.submit(conf);
+ app.waitForState(job, JobState.RUNNING);
+ //all maps would be running
+ Assert.assertEquals("Num tasks not correct", 3, job.getTasks().size());
+ Iterator<Task> it = job.getTasks().values().iterator();
+ Task mapTask1 = it.next();
+ Task mapTask2 = it.next();
+ Task reduceTask = it.next();
+
+ // all maps must be running
+ app.waitForState(mapTask1, TaskState.RUNNING);
+ app.waitForState(mapTask2, TaskState.RUNNING);
+
+ TaskAttempt task1Attempt = mapTask1.getAttempts().values().iterator().next();
+ TaskAttempt task2Attempt = mapTask2.getAttempts().values().iterator().next();
+
+ //before sending the TA_DONE, event make sure attempt has come to
+ //RUNNING state
+ app.waitForState(task1Attempt, TaskAttemptState.RUNNING);
+ app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
+
+ // reduces must be in NEW state
+ Assert.assertEquals("Reduce Task state not correct",
+ TaskState.NEW, reduceTask.getReport().getTaskState());
+
+ //send the done signal to the 1st map task
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptEvent(
+ mapTask1.getAttempts().values().iterator().next().getID(),
+ TaskAttemptEventType.TA_DONE));
+
+ //wait for first map task to complete
+ app.waitForState(mapTask1, TaskState.SUCCEEDED);
+
+ //Once the first map completes, it will schedule the reduces
+ //now reduce must be running
+ app.waitForState(reduceTask, TaskState.RUNNING);
+
+ //send the done signal to 2nd map and the reduce to complete the job
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptEvent(
+ mapTask2.getAttempts().values().iterator().next().getID(),
+ TaskAttemptEventType.TA_DONE));
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptEvent(
+ reduceTask.getAttempts().values().iterator().next().getID(),
+ TaskAttemptEventType.TA_DONE));
+
+ app.waitForState(job, JobState.SUCCEEDED);
+ }
+
+ @Test
+ public void testJobError() throws Exception {
+ MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
+ Job job = app.submit(new Configuration());
+ app.waitForState(job, JobState.RUNNING);
+ Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
+ Iterator<Task> it = job.getTasks().values().iterator();
+ Task task = it.next();
+ app.waitForState(task, TaskState.RUNNING);
+
+ //send an invalid event on task at current state
+ app.getContext().getEventHandler().handle(
+ new TaskEvent(
+ task.getID(), TaskEventType.T_SCHEDULE));
+
+ //this must lead to job error
+ app.waitForState(job, JobState.ERROR);
+ }
+
+ @Test
+ public void checkJobStateTypeConversion() {
+ //verify that all states can be converted without
+ // throwing an exception
+ for (JobState state : JobState.values()) {
+ TypeConverter.fromYarn(state);
+ }
+ }
+
+ @Test
+ public void checkTaskStateTypeConversion() {
+ //verify that all states can be converted without
+ // throwing an exception
+ for (TaskState state : TaskState.values()) {
+ TypeConverter.fromYarn(state);
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ TestMRApp t = new TestMRApp();
+ t.testMapReduce();
+ t.testCommitPending();
+ t.testCompletedMapsForReduceSlowstart();
+ t.testJobError();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java
new file mode 100644
index 0000000..c32f128
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java
@@ -0,0 +1,189 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import java.util.Iterator;
+import java.util.List;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.Phase;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
+import org.apache.hadoop.mapreduce.v2.app.client.MRClientService;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.junit.Test;
+
+public class TestMRClientService {
+
+ private static RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+ @Test
+ public void test() throws Exception {
+ MRAppWithClientService app = new MRAppWithClientService(1, 0, false);
+ Configuration conf = new Configuration();
+ Job job = app.submit(conf);
+ app.waitForState(job, JobState.RUNNING);
+ Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
+ Iterator<Task> it = job.getTasks().values().iterator();
+ Task task = it.next();
+ app.waitForState(task, TaskState.RUNNING);
+ TaskAttempt attempt = task.getAttempts().values().iterator().next();
+ app.waitForState(attempt, TaskAttemptState.RUNNING);
+
+ // send the diagnostic
+ String diagnostic1 = "Diagnostic1";
+ String diagnostic2 = "Diagnostic2";
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptDiagnosticsUpdateEvent(attempt.getID(), diagnostic1));
+
+ // send the status update
+ TaskAttemptStatus taskAttemptStatus = new TaskAttemptStatus();
+ taskAttemptStatus.id = attempt.getID();
+ taskAttemptStatus.progress = 0.5f;
+ taskAttemptStatus.diagnosticInfo = diagnostic2;
+ taskAttemptStatus.stateString = "RUNNING";
+ taskAttemptStatus.taskState = TaskAttemptState.RUNNING;
+ taskAttemptStatus.phase = Phase.MAP;
+ taskAttemptStatus.outputSize = 3;
+ // send the status update
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptStatusUpdateEvent(attempt.getID(), taskAttemptStatus));
+
+
+ //verify that all object are fully populated by invoking RPCs.
+ YarnRPC rpc = YarnRPC.create(conf);
+ MRClientProtocol proxy =
+ (MRClientProtocol) rpc.getProxy(MRClientProtocol.class,
+ app.clientService.getBindAddress(), conf);
+ GetCountersRequest gcRequest =
+ recordFactory.newRecordInstance(GetCountersRequest.class);
+ gcRequest.setJobId(job.getID());
+ Assert.assertNotNull("Counters is null",
+ proxy.getCounters(gcRequest).getCounters());
+
+ GetJobReportRequest gjrRequest =
+ recordFactory.newRecordInstance(GetJobReportRequest.class);
+ gjrRequest.setJobId(job.getID());
+ Assert.assertNotNull("JobReport is null",
+ proxy.getJobReport(gjrRequest).getJobReport());
+
+ GetTaskAttemptCompletionEventsRequest gtaceRequest =
+ recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsRequest.class);
+ gtaceRequest.setJobId(job.getID());
+ gtaceRequest.setFromEventId(0);
+ gtaceRequest.setMaxEvents(10);
+ Assert.assertNotNull("TaskCompletionEvents is null",
+ proxy.getTaskAttemptCompletionEvents(gtaceRequest).getCompletionEventList());
+
+ GetDiagnosticsRequest gdRequest =
+ recordFactory.newRecordInstance(GetDiagnosticsRequest.class);
+ gdRequest.setTaskAttemptId(attempt.getID());
+ Assert.assertNotNull("Diagnostics is null",
+ proxy.getDiagnostics(gdRequest).getDiagnosticsList());
+
+ GetTaskAttemptReportRequest gtarRequest =
+ recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
+ gtarRequest.setTaskAttemptId(attempt.getID());
+ Assert.assertNotNull("TaskAttemptReport is null",
+ proxy.getTaskAttemptReport(gtarRequest).getTaskAttemptReport());
+
+ GetTaskReportRequest gtrRequest =
+ recordFactory.newRecordInstance(GetTaskReportRequest.class);
+ gtrRequest.setTaskId(task.getID());
+ Assert.assertNotNull("TaskReport is null",
+ proxy.getTaskReport(gtrRequest).getTaskReport());
+
+ GetTaskReportsRequest gtreportsRequest =
+ recordFactory.newRecordInstance(GetTaskReportsRequest.class);
+ gtreportsRequest.setJobId(job.getID());
+ gtreportsRequest.setTaskType(TaskType.MAP);
+ Assert.assertNotNull("TaskReports for map is null",
+ proxy.getTaskReports(gtreportsRequest).getTaskReportList());
+
+ gtreportsRequest =
+ recordFactory.newRecordInstance(GetTaskReportsRequest.class);
+ gtreportsRequest.setJobId(job.getID());
+ gtreportsRequest.setTaskType(TaskType.REDUCE);
+ Assert.assertNotNull("TaskReports for reduce is null",
+ proxy.getTaskReports(gtreportsRequest).getTaskReportList());
+
+ List<String> diag = proxy.getDiagnostics(gdRequest).getDiagnosticsList();
+ Assert.assertEquals("Num diagnostics not correct", 2 , diag.size());
+ Assert.assertEquals("Diag 1 not correct",
+ diagnostic1, diag.get(0).toString());
+ Assert.assertEquals("Diag 2 not correct",
+ diagnostic2, diag.get(1).toString());
+
+ TaskReport taskReport = proxy.getTaskReport(gtrRequest).getTaskReport();
+ Assert.assertEquals("Num diagnostics not correct", 2,
+ taskReport.getDiagnosticsCount());
+
+ //send the done signal to the task
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptEvent(
+ task.getAttempts().values().iterator().next().getID(),
+ TaskAttemptEventType.TA_DONE));
+
+ app.waitForState(job, JobState.SUCCEEDED);
+ }
+
+ class MRAppWithClientService extends MRApp {
+ MRClientService clientService = null;
+ MRAppWithClientService(int maps, int reduces, boolean autoComplete) {
+ super(maps, reduces, autoComplete, "MRAppWithClientService", true);
+ }
+ @Override
+ protected ClientService createClientService(AppContext context) {
+ clientService = new MRClientService(context);
+ return clientService;
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ TestMRClientService t = new TestMRClientService();
+ t.test();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
new file mode 100644
index 0000000..cbf3ab0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
@@ -0,0 +1,506 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent;
+import org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.AMRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.AMResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationMaster;
+import org.apache.hadoop.yarn.api.records.ApplicationStatus;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.event.Event;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
+import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestRMContainerAllocator {
+// private static final Log LOG = LogFactory.getLog(TestRMContainerAllocator.class);
+// private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+//
+// @BeforeClass
+// public static void preTests() {
+// DefaultMetricsSystem.shutdown();
+// }
+//
+// @Test
+// public void testSimple() throws Exception {
+// FifoScheduler scheduler = createScheduler();
+// LocalRMContainerAllocator allocator = new LocalRMContainerAllocator(
+// scheduler, new Configuration());
+//
+// //add resources to scheduler
+// RMNode nodeManager1 = addNode(scheduler, "h1", 10240);
+// RMNode nodeManager2 = addNode(scheduler, "h2", 10240);
+// RMNode nodeManager3 = addNode(scheduler, "h3", 10240);
+//
+// //create the container request
+// ContainerRequestEvent event1 =
+// createReq(1, 1024, new String[]{"h1"});
+// allocator.sendRequest(event1);
+//
+// //send 1 more request with different resource req
+// ContainerRequestEvent event2 = createReq(2, 1024, new String[]{"h2"});
+// allocator.sendRequest(event2);
+//
+// //this tells the scheduler about the requests
+// //as nodes are not added, no allocations
+// List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
+// Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
+//
+// //send another request with different resource and priority
+// ContainerRequestEvent event3 = createReq(3, 1024, new String[]{"h3"});
+// allocator.sendRequest(event3);
+//
+// //this tells the scheduler about the requests
+// //as nodes are not added, no allocations
+// assigned = allocator.schedule();
+// Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
+//
+// //update resources in scheduler
+// scheduler.nodeUpdate(nodeManager1); // Node heartbeat
+// scheduler.nodeUpdate(nodeManager2); // Node heartbeat
+// scheduler.nodeUpdate(nodeManager3); // Node heartbeat
+//
+//
+// assigned = allocator.schedule();
+// checkAssignments(
+// new ContainerRequestEvent[]{event1, event2, event3}, assigned, false);
+// }
+//
+// //TODO: Currently Scheduler seems to have bug where it does not work
+// //for Application asking for containers with different capabilities.
+// //@Test
+// public void testResource() throws Exception {
+// FifoScheduler scheduler = createScheduler();
+// LocalRMContainerAllocator allocator = new LocalRMContainerAllocator(
+// scheduler, new Configuration());
+//
+// //add resources to scheduler
+// RMNode nodeManager1 = addNode(scheduler, "h1", 10240);
+// RMNode nodeManager2 = addNode(scheduler, "h2", 10240);
+// RMNode nodeManager3 = addNode(scheduler, "h3", 10240);
+//
+// //create the container request
+// ContainerRequestEvent event1 =
+// createReq(1, 1024, new String[]{"h1"});
+// allocator.sendRequest(event1);
+//
+// //send 1 more request with different resource req
+// ContainerRequestEvent event2 = createReq(2, 2048, new String[]{"h2"});
+// allocator.sendRequest(event2);
+//
+// //this tells the scheduler about the requests
+// //as nodes are not added, no allocations
+// List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
+// Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
+//
+// //update resources in scheduler
+// scheduler.nodeUpdate(nodeManager1); // Node heartbeat
+// scheduler.nodeUpdate(nodeManager2); // Node heartbeat
+// scheduler.nodeUpdate(nodeManager3); // Node heartbeat
+//
+// assigned = allocator.schedule();
+// checkAssignments(
+// new ContainerRequestEvent[]{event1, event2}, assigned, false);
+// }
+//
+// @Test
+// public void testMapReduceScheduling() throws Exception {
+// FifoScheduler scheduler = createScheduler();
+// Configuration conf = new Configuration();
+// LocalRMContainerAllocator allocator = new LocalRMContainerAllocator(
+// scheduler, conf);
+//
+// //add resources to scheduler
+// RMNode nodeManager1 = addNode(scheduler, "h1", 1024);
+// RMNode nodeManager2 = addNode(scheduler, "h2", 10240);
+// RMNode nodeManager3 = addNode(scheduler, "h3", 10240);
+//
+// //create the container request
+// //send MAP request
+// ContainerRequestEvent event1 =
+// createReq(1, 2048, new String[]{"h1", "h2"}, true, false);
+// allocator.sendRequest(event1);
+//
+// //send REDUCE request
+// ContainerRequestEvent event2 = createReq(2, 3000, new String[]{"h1"}, false, true);
+// allocator.sendRequest(event2);
+//
+// //send MAP request
+// ContainerRequestEvent event3 = createReq(3, 2048, new String[]{"h3"}, false, false);
+// allocator.sendRequest(event3);
+//
+// //this tells the scheduler about the requests
+// //as nodes are not added, no allocations
+// List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
+// Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
+//
+// //update resources in scheduler
+// scheduler.nodeUpdate(nodeManager1); // Node heartbeat
+// scheduler.nodeUpdate(nodeManager2); // Node heartbeat
+// scheduler.nodeUpdate(nodeManager3); // Node heartbeat
+//
+// assigned = allocator.schedule();
+// checkAssignments(
+// new ContainerRequestEvent[]{event1, event3}, assigned, false);
+//
+// //validate that no container is assigned to h1 as it doesn't have 2048
+// for (TaskAttemptContainerAssignedEvent assig : assigned) {
+// Assert.assertFalse("Assigned count not correct",
+// "h1".equals(assig.getContainer().getNodeId().getHost()));
+// }
+// }
+//
+//
+//
+// private RMNode addNode(FifoScheduler scheduler,
+// String nodeName, int memory) {
+// NodeId nodeId = recordFactory.newRecordInstance(NodeId.class);
+// nodeId.setHost(nodeName);
+// nodeId.setPort(1234);
+// Resource resource = recordFactory.newRecordInstance(Resource.class);
+// resource.setMemory(memory);
+// RMNode nodeManager = new RMNodeImpl(nodeId, null, nodeName, 0, 0,
+// ResourceTrackerService.resolve(nodeName), resource);
+// scheduler.addNode(nodeManager); // Node registration
+// return nodeManager;
+// }
+//
+// private FifoScheduler createScheduler() throws YarnRemoteException {
+// FifoScheduler fsc = new FifoScheduler() {
+// //override this to copy the objects
+// //otherwise FifoScheduler updates the numContainers in same objects as kept by
+// //RMContainerAllocator
+//
+// @Override
+// public synchronized void allocate(ApplicationAttemptId applicationId,
+// List<ResourceRequest> ask) {
+// List<ResourceRequest> askCopy = new ArrayList<ResourceRequest>();
+// for (ResourceRequest req : ask) {
+// ResourceRequest reqCopy = recordFactory.newRecordInstance(ResourceRequest.class);
+// reqCopy.setPriority(req.getPriority());
+// reqCopy.setHostName(req.getHostName());
+// reqCopy.setCapability(req.getCapability());
+// reqCopy.setNumContainers(req.getNumContainers());
+// askCopy.add(reqCopy);
+// }
+// super.allocate(applicationId, askCopy);
+// }
+// };
+// try {
+// fsc.reinitialize(new Configuration(), new ContainerTokenSecretManager(), null);
+// fsc.addApplication(recordFactory.newRecordInstance(ApplicationId.class),
+// recordFactory.newRecordInstance(ApplicationMaster.class),
+// "test", null, null, StoreFactory.createVoidAppStore());
+// } catch(IOException ie) {
+// LOG.info("add application failed with ", ie);
+// assert(false);
+// }
+// return fsc;
+// }
+//
+// private ContainerRequestEvent createReq(
+// int attemptid, int memory, String[] hosts) {
+// return createReq(attemptid, memory, hosts, false, false);
+// }
+//
+// private ContainerRequestEvent createReq(
+// int attemptid, int memory, String[] hosts, boolean earlierFailedAttempt, boolean reduce) {
+// ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class);
+// appId.setClusterTimestamp(0);
+// appId.setId(0);
+// JobId jobId = recordFactory.newRecordInstance(JobId.class);
+// jobId.setAppId(appId);
+// jobId.setId(0);
+// TaskId taskId = recordFactory.newRecordInstance(TaskId.class);
+// taskId.setId(0);
+// taskId.setJobId(jobId);
+// if (reduce) {
+// taskId.setTaskType(TaskType.REDUCE);
+// } else {
+// taskId.setTaskType(TaskType.MAP);
+// }
+// TaskAttemptId attemptId = recordFactory.newRecordInstance(TaskAttemptId.class);
+// attemptId.setId(attemptid);
+// attemptId.setTaskId(taskId);
+// Resource containerNeed = recordFactory.newRecordInstance(Resource.class);
+// containerNeed.setMemory(memory);
+// if (earlierFailedAttempt) {
+// return ContainerRequestEvent.
+// createContainerRequestEventForFailedContainer(attemptId, containerNeed);
+// }
+// return new ContainerRequestEvent(attemptId,
+// containerNeed,
+// hosts, new String[] {NetworkTopology.DEFAULT_RACK});
+// }
+//
+// private void checkAssignments(ContainerRequestEvent[] requests,
+// List<TaskAttemptContainerAssignedEvent> assignments,
+// boolean checkHostMatch) {
+// Assert.assertNotNull("Container not assigned", assignments);
+// Assert.assertEquals("Assigned count not correct",
+// requests.length, assignments.size());
+//
+// //check for uniqueness of containerIDs
+// Set<ContainerId> containerIds = new HashSet<ContainerId>();
+// for (TaskAttemptContainerAssignedEvent assigned : assignments) {
+// containerIds.add(assigned.getContainer().getId());
+// }
+// Assert.assertEquals("Assigned containers must be different",
+// assignments.size(), containerIds.size());
+//
+// //check for all assignment
+// for (ContainerRequestEvent req : requests) {
+// TaskAttemptContainerAssignedEvent assigned = null;
+// for (TaskAttemptContainerAssignedEvent ass : assignments) {
+// if (ass.getTaskAttemptID().equals(req.getAttemptID())){
+// assigned = ass;
+// break;
+// }
+// }
+// checkAssignment(req, assigned, checkHostMatch);
+// }
+// }
+//
+// private void checkAssignment(ContainerRequestEvent request,
+// TaskAttemptContainerAssignedEvent assigned, boolean checkHostMatch) {
+// Assert.assertNotNull("Nothing assigned to attempt " + request.getAttemptID(),
+// assigned);
+// Assert.assertEquals("assigned to wrong attempt", request.getAttemptID(),
+// assigned.getTaskAttemptID());
+// if (checkHostMatch) {
+// Assert.assertTrue("Not assigned to requested host", Arrays.asList(
+// request.getHosts()).contains(
+// assigned.getContainer().getNodeId().toString()));
+// }
+//
+// }
+//
+// //Mock RMContainerAllocator
+// //Instead of talking to remote Scheduler,uses the local Scheduler
+// public static class LocalRMContainerAllocator extends RMContainerAllocator {
+// private static final List<TaskAttemptContainerAssignedEvent> events =
+// new ArrayList<TaskAttemptContainerAssignedEvent>();
+//
+// public static class AMRMProtocolImpl implements AMRMProtocol {
+//
+// private ResourceScheduler resourceScheduler;
+//
+// public AMRMProtocolImpl(ResourceScheduler resourceScheduler) {
+// this.resourceScheduler = resourceScheduler;
+// }
+//
+// @Override
+// public RegisterApplicationMasterResponse registerApplicationMaster(RegisterApplicationMasterRequest request) throws YarnRemoteException {
+// RegisterApplicationMasterResponse response = recordFactory.newRecordInstance(RegisterApplicationMasterResponse.class);
+// return response;
+// }
+//
+// public AllocateResponse allocate(AllocateRequest request) throws YarnRemoteException {
+// List<ResourceRequest> ask = request.getAskList();
+// List<Container> release = request.getReleaseList();
+// try {
+// AMResponse response = recordFactory.newRecordInstance(AMResponse.class);
+// Allocation allocation = resourceScheduler.allocate(request.getApplicationAttemptId(), ask);
+// response.addAllNewContainers(allocation.getContainers());
+// response.setAvailableResources(allocation.getResourceLimit());
+// AllocateResponse allocateResponse = recordFactory.newRecordInstance(AllocateResponse.class);
+// allocateResponse.setAMResponse(response);
+// return allocateResponse;
+// } catch(IOException ie) {
+// throw RPCUtil.getRemoteException(ie);
+// }
+// }
+//
+// @Override
+// public FinishApplicationMasterResponse finishApplicationMaster(FinishApplicationMasterRequest request) throws YarnRemoteException {
+// FinishApplicationMasterResponse response = recordFactory.newRecordInstance(FinishApplicationMasterResponse.class);
+// return response;
+// }
+//
+// }
+//
+// private ResourceScheduler scheduler;
+// LocalRMContainerAllocator(ResourceScheduler scheduler, Configuration conf) {
+// super(null, new TestContext(events));
+// this.scheduler = scheduler;
+// super.init(conf);
+// super.start();
+// }
+//
+// protected AMRMProtocol createSchedulerProxy() {
+// return new AMRMProtocolImpl(scheduler);
+// }
+//
+// @Override
+// protected void register() {}
+// @Override
+// protected void unregister() {}
+//
+// @Override
+// protected Resource getMinContainerCapability() {
+// Resource res = recordFactory.newRecordInstance(Resource.class);
+// res.setMemory(1024);
+// return res;
+// }
+//
+// @Override
+// protected Resource getMaxContainerCapability() {
+// Resource res = recordFactory.newRecordInstance(Resource.class);
+// res.setMemory(10240);
+// return res;
+// }
+//
+// public void sendRequest(ContainerRequestEvent req) {
+// sendRequests(Arrays.asList(new ContainerRequestEvent[]{req}));
+// }
+//
+// public void sendRequests(List<ContainerRequestEvent> reqs) {
+// for (ContainerRequestEvent req : reqs) {
+// handle(req);
+// }
+// }
+//
+// //API to be used by tests
+// public List<TaskAttemptContainerAssignedEvent> schedule() {
+// //run the scheduler
+// try {
+// heartbeat();
+// } catch (Exception e) {
+// LOG.error("error in heartbeat ", e);
+// throw new YarnException(e);
+// }
+//
+// List<TaskAttemptContainerAssignedEvent> result = new ArrayList(events);
+// events.clear();
+// return result;
+// }
+//
+// protected void startAllocatorThread() {
+// //override to NOT start thread
+// }
+//
+// static class TestContext implements AppContext {
+// private List<TaskAttemptContainerAssignedEvent> events;
+// TestContext(List<TaskAttemptContainerAssignedEvent> events) {
+// this.events = events;
+// }
+// @Override
+// public Map<JobId, Job> getAllJobs() {
+// return null;
+// }
+// @Override
+// public ApplicationAttemptId getApplicationAttemptId() {
+// return recordFactory.newRecordInstance(ApplicationAttemptId.class);
+// }
+// @Override
+// public ApplicationId getApplicationID() {
+// return recordFactory.newRecordInstance(ApplicationId.class);
+// }
+// @Override
+// public EventHandler getEventHandler() {
+// return new EventHandler() {
+// @Override
+// public void handle(Event event) {
+// events.add((TaskAttemptContainerAssignedEvent) event);
+// }
+// };
+// }
+// @Override
+// public Job getJob(JobId jobID) {
+// return null;
+// }
+//
+// @Override
+// public String getUser() {
+// return null;
+// }
+//
+// @Override
+// public Clock getClock() {
+// return null;
+// }
+//
+// @Override
+// public String getApplicationName() {
+// return null;
+// }
+//
+// @Override
+// public long getStartTime() {
+// return 0;
+// }
+// }
+// }
+//
+// public static void main(String[] args) throws Exception {
+// TestRMContainerAllocator t = new TestRMContainerAllocator();
+// t.testSimple();
+// //t.testResource();
+// t.testMapReduceScheduling();
+// }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
new file mode 100644
index 0000000..448af9b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
@@ -0,0 +1,202 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import java.util.Iterator;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.junit.Test;
+
+public class TestRecovery {
+
+ private static final Log LOG = LogFactory.getLog(TestRecovery.class);
+
+ @Test
+ public void testCrashed() throws Exception {
+ int runCount = 0;
+ MRApp app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), true, ++runCount);
+ Configuration conf = new Configuration();
+ conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
+ Job job = app.submit(conf);
+ app.waitForState(job, JobState.RUNNING);
+ long jobStartTime = job.getReport().getStartTime();
+ //all maps would be running
+ Assert.assertEquals("No of tasks not correct",
+ 3, job.getTasks().size());
+ Iterator<Task> it = job.getTasks().values().iterator();
+ Task mapTask1 = it.next();
+ Task mapTask2 = it.next();
+ Task reduceTask = it.next();
+
+ // all maps must be running
+ app.waitForState(mapTask1, TaskState.RUNNING);
+ app.waitForState(mapTask2, TaskState.RUNNING);
+
+ TaskAttempt task1Attempt1 = mapTask1.getAttempts().values().iterator().next();
+ TaskAttempt task2Attempt = mapTask2.getAttempts().values().iterator().next();
+
+ //before sending the TA_DONE, event make sure attempt has come to
+ //RUNNING state
+ app.waitForState(task1Attempt1, TaskAttemptState.RUNNING);
+ app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
+
+ // reduces must be in NEW state
+ Assert.assertEquals("Reduce Task state not correct",
+ TaskState.RUNNING, reduceTask.getReport().getTaskState());
+
+ //send the fail signal to the 1st map task attempt
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptEvent(
+ task1Attempt1.getID(),
+ TaskAttemptEventType.TA_FAILMSG));
+
+ app.waitForState(task1Attempt1, TaskAttemptState.FAILED);
+
+ while (mapTask1.getAttempts().size() != 2) {
+ Thread.sleep(2000);
+ LOG.info("Waiting for next attempt to start");
+ }
+ Iterator<TaskAttempt> itr = mapTask1.getAttempts().values().iterator();
+ itr.next();
+ TaskAttempt task1Attempt2 = itr.next();
+
+ app.waitForState(task1Attempt2, TaskAttemptState.RUNNING);
+
+ //send the kill signal to the 1st map 2nd attempt
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptEvent(
+ task1Attempt2.getID(),
+ TaskAttemptEventType.TA_KILL));
+
+ app.waitForState(task1Attempt2, TaskAttemptState.KILLED);
+
+ while (mapTask1.getAttempts().size() != 3) {
+ Thread.sleep(2000);
+ LOG.info("Waiting for next attempt to start");
+ }
+ itr = mapTask1.getAttempts().values().iterator();
+ itr.next();
+ itr.next();
+ TaskAttempt task1Attempt3 = itr.next();
+
+ app.waitForState(task1Attempt3, TaskAttemptState.RUNNING);
+
+ //send the done signal to the 1st map 3rd attempt
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptEvent(
+ task1Attempt3.getID(),
+ TaskAttemptEventType.TA_DONE));
+
+ //wait for first map task to complete
+ app.waitForState(mapTask1, TaskState.SUCCEEDED);
+ long task1StartTime = mapTask1.getReport().getStartTime();
+ long task1FinishTime = mapTask1.getReport().getFinishTime();
+
+ //stop the app
+ app.stop();
+
+ //rerun
+ //in rerun the 1st map will be recovered from previous run
+ app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), false, ++runCount);
+ conf = new Configuration();
+ conf.setBoolean(AMConstants.RECOVERY_ENABLE, true);
+ conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
+ job = app.submit(conf);
+ app.waitForState(job, JobState.RUNNING);
+ //all maps would be running
+ Assert.assertEquals("No of tasks not correct",
+ 3, job.getTasks().size());
+ it = job.getTasks().values().iterator();
+ mapTask1 = it.next();
+ mapTask2 = it.next();
+ reduceTask = it.next();
+
+ // first map will be recovered, no need to send done
+ app.waitForState(mapTask1, TaskState.SUCCEEDED);
+
+ app.waitForState(mapTask2, TaskState.RUNNING);
+
+ task2Attempt = mapTask2.getAttempts().values().iterator().next();
+ //before sending the TA_DONE, event make sure attempt has come to
+ //RUNNING state
+ app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
+
+ //send the done signal to the 2nd map task
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptEvent(
+ mapTask2.getAttempts().values().iterator().next().getID(),
+ TaskAttemptEventType.TA_DONE));
+
+ //wait to get it completed
+ app.waitForState(mapTask2, TaskState.SUCCEEDED);
+
+ //wait for reduce to be running before sending done
+ app.waitForState(reduceTask, TaskState.RUNNING);
+ //send the done signal to the reduce
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptEvent(
+ reduceTask.getAttempts().values().iterator().next().getID(),
+ TaskAttemptEventType.TA_DONE));
+
+ app.waitForState(job, JobState.SUCCEEDED);
+ app.verifyCompleted();
+ Assert.assertEquals("Job Start time not correct",
+ jobStartTime, job.getReport().getStartTime());
+ Assert.assertEquals("Task Start time not correct",
+ task1StartTime, mapTask1.getReport().getStartTime());
+ Assert.assertEquals("Task Finish time not correct",
+ task1FinishTime, mapTask1.getReport().getFinishTime());
+ }
+
+ class MRAppWithHistory extends MRApp {
+ public MRAppWithHistory(int maps, int reduces, boolean autoComplete,
+ String testName, boolean cleanOnStart, int startCount) {
+ super(maps, reduces, autoComplete, testName, cleanOnStart, startCount);
+ }
+
+ @Override
+ protected EventHandler<JobHistoryEvent> createJobHistoryHandler(
+ AppContext context) {
+ JobHistoryEventHandler eventHandler = new JobHistoryEventHandler(context,
+ getStartCount());
+ return eventHandler;
+ }
+ }
+
+ public static void main(String[] arg) throws Exception {
+ TestRecovery test = new TestRecovery();
+ test.testCrashed();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
new file mode 100644
index 0000000..a359fd3
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
@@ -0,0 +1,779 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
+import org.apache.hadoop.mapreduce.v2.app.speculate.DefaultSpeculator;
+import org.apache.hadoop.mapreduce.v2.app.speculate.ExponentiallySmoothedTaskRuntimeEstimator;
+import org.apache.hadoop.mapreduce.v2.app.speculate.LegacyTaskRuntimeEstimator;
+import org.apache.hadoop.mapreduce.v2.app.speculate.Speculator;
+import org.apache.hadoop.mapreduce.v2.app.speculate.SpeculatorEvent;
+import org.apache.hadoop.mapreduce.v2.app.speculate.TaskRuntimeEstimator;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.SystemClock;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.service.CompositeService;
+import org.junit.Assert;
+import org.junit.Test;
+
+
+public class TestRuntimeEstimators {
+
+ private static int INITIAL_NUMBER_FREE_SLOTS = 600;
+ private static int MAP_SLOT_REQUIREMENT = 3;
+ // this has to be at least as much as map slot requirement
+ private static int REDUCE_SLOT_REQUIREMENT = 4;
+ private static int MAP_TASKS = 200;
+ private static int REDUCE_TASKS = 150;
+
+ MockClock clock;
+
+ Job myJob;
+
+ AppContext myAppContext;
+
+ private static final Log LOG = LogFactory.getLog(TestRuntimeEstimators.class);
+
+ private final AtomicInteger slotsInUse = new AtomicInteger(0);
+
+ AsyncDispatcher dispatcher;
+
+ DefaultSpeculator speculator;
+
+ TaskRuntimeEstimator estimator;
+
+ // This is a huge kluge. The real implementations have a decent approach
+ private final AtomicInteger completedMaps = new AtomicInteger(0);
+ private final AtomicInteger completedReduces = new AtomicInteger(0);
+
+ private final AtomicInteger successfulSpeculations
+ = new AtomicInteger(0);
+ private final AtomicLong taskTimeSavedBySpeculation
+ = new AtomicLong(0L);
+
+ private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+ private void coreTestEstimator
+ (TaskRuntimeEstimator testedEstimator, int expectedSpeculations) {
+ estimator = testedEstimator;
+ clock = new MockClock();
+ dispatcher = new AsyncDispatcher();
+ myJob = null;
+ slotsInUse.set(0);
+ completedMaps.set(0);
+ completedReduces.set(0);
+ successfulSpeculations.set(0);
+ taskTimeSavedBySpeculation.set(0);
+
+ clock.advanceTime(1000);
+
+ Configuration conf = new Configuration();
+
+ myAppContext = new MyAppContext(MAP_TASKS, REDUCE_TASKS);
+ myJob = myAppContext.getAllJobs().values().iterator().next();
+
+ estimator.contextualize(conf, myAppContext);
+
+ speculator = new DefaultSpeculator(conf, myAppContext, estimator, clock);
+
+ dispatcher.register(Speculator.EventType.class, speculator);
+
+ dispatcher.register(TaskEventType.class, new SpeculationRequestEventHandler());
+
+ dispatcher.init(conf);
+ dispatcher.start();
+
+
+
+ speculator.init(conf);
+ speculator.start();
+
+ // Now that the plumbing is hooked up, we do the following:
+ // do until all tasks are finished, ...
+ // 1: If we have spare capacity, assign as many map tasks as we can, then
+ // assign as many reduce tasks as we can. Note that an odd reduce
+ // task might be started while there are still map tasks, because
+ // map tasks take 3 slots and reduce tasks 2 slots.
+ // 2: Send a speculation event for every task attempt that's running
+ // note that new attempts might get started by the speculator
+
+ // discover undone tasks
+ int undoneMaps = MAP_TASKS;
+ int undoneReduces = REDUCE_TASKS;
+
+ // build a task sequence where all the maps precede any of the reduces
+ List<Task> allTasksSequence = new LinkedList<Task>();
+
+ allTasksSequence.addAll(myJob.getTasks(TaskType.MAP).values());
+ allTasksSequence.addAll(myJob.getTasks(TaskType.REDUCE).values());
+
+ while (undoneMaps + undoneReduces > 0) {
+ undoneMaps = 0; undoneReduces = 0;
+ // start all attempts which are new but for which there is enough slots
+ for (Task task : allTasksSequence) {
+ if (!task.isFinished()) {
+ if (task.getType() == TaskType.MAP) {
+ ++undoneMaps;
+ } else {
+ ++undoneReduces;
+ }
+ }
+ for (TaskAttempt attempt : task.getAttempts().values()) {
+ if (attempt.getState() == TaskAttemptState.NEW
+ && INITIAL_NUMBER_FREE_SLOTS - slotsInUse.get()
+ >= taskTypeSlots(task.getType())) {
+ MyTaskAttemptImpl attemptImpl = (MyTaskAttemptImpl)attempt;
+ SpeculatorEvent event
+ = new SpeculatorEvent(attempt.getID(), false, clock.getTime());
+ speculator.handle(event);
+ attemptImpl.startUp();
+ } else {
+ // If a task attempt is in progress we should send the news to
+ // the Speculator.
+ TaskAttemptStatus status = new TaskAttemptStatus();
+ status.id = attempt.getID();
+ status.progress = attempt.getProgress();
+ status.stateString = attempt.getState().name();
+ status.taskState = attempt.getState();
+ SpeculatorEvent event = new SpeculatorEvent(status, clock.getTime());
+ speculator.handle(event);
+ }
+ }
+ }
+
+ long startTime = System.currentTimeMillis();
+
+ // drain the speculator event queue
+ while (!speculator.eventQueueEmpty()) {
+ Thread.yield();
+ if (System.currentTimeMillis() > startTime + 130000) {
+ return;
+ }
+ }
+
+ clock.advanceTime(1000L);
+
+ if (clock.getTime() % 10000L == 0L) {
+ speculator.scanForSpeculations();
+ }
+ }
+
+ Assert.assertEquals("We got the wrong number of successful speculations.",
+ expectedSpeculations, successfulSpeculations.get());
+ }
+
+ @Test
+ public void testLegacyEstimator() throws Exception {
+ TaskRuntimeEstimator specificEstimator = new LegacyTaskRuntimeEstimator();
+ coreTestEstimator(specificEstimator, 3);
+ }
+
+ @Test
+ public void testExponentialEstimator() throws Exception {
+ TaskRuntimeEstimator specificEstimator
+ = new ExponentiallySmoothedTaskRuntimeEstimator();
+ coreTestEstimator(specificEstimator, 3);
+ }
+
+ int taskTypeSlots(TaskType type) {
+ return type == TaskType.MAP ? MAP_SLOT_REQUIREMENT : REDUCE_SLOT_REQUIREMENT;
+ }
+
+ class SpeculationRequestEventHandler implements EventHandler<TaskEvent> {
+
+ @Override
+ public void handle(TaskEvent event) {
+ TaskId taskID = event.getTaskID();
+ Task task = myJob.getTask(taskID);
+
+ Assert.assertEquals
+ ("Wrong type event", TaskEventType.T_ADD_SPEC_ATTEMPT, event.getType());
+
+ System.out.println("SpeculationRequestEventHandler.handle adds a speculation task for " + taskID);
+
+ addAttempt(task);
+ }
+ }
+
+ void addAttempt(Task task) {
+ MyTaskImpl myTask = (MyTaskImpl) task;
+
+ myTask.addAttempt();
+ }
+
+ class MyTaskImpl implements Task {
+ private final TaskId taskID;
+ private final Map<TaskAttemptId, TaskAttempt> attempts
+ = new ConcurrentHashMap<TaskAttemptId, TaskAttempt>(4);
+
+ MyTaskImpl(JobId jobID, int index, TaskType type) {
+ taskID = recordFactory.newRecordInstance(TaskId.class);
+ taskID.setId(index);
+ taskID.setTaskType(type);
+ taskID.setJobId(jobID);
+ }
+
+ void addAttempt() {
+ TaskAttempt taskAttempt
+ = new MyTaskAttemptImpl(taskID, attempts.size(), clock);
+ TaskAttemptId taskAttemptID = taskAttempt.getID();
+
+ attempts.put(taskAttemptID, taskAttempt);
+
+ System.out.println("TLTRE.MyTaskImpl.addAttempt " + getID());
+
+ SpeculatorEvent event = new SpeculatorEvent(taskID, +1);
+ dispatcher.getEventHandler().handle(event);
+ }
+
+ @Override
+ public TaskId getID() {
+ return taskID;
+ }
+
+ @Override
+ public TaskReport getReport() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public Counters getCounters() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public float getProgress() {
+ float result = 0.0F;
+
+
+ for (TaskAttempt attempt : attempts.values()) {
+ result = Math.max(result, attempt.getProgress());
+ }
+
+ return result;
+ }
+
+ @Override
+ public TaskType getType() {
+ return taskID.getTaskType();
+ }
+
+ @Override
+ public Map<TaskAttemptId, TaskAttempt> getAttempts() {
+ Map<TaskAttemptId, TaskAttempt> result
+ = new HashMap<TaskAttemptId, TaskAttempt>(attempts.size());
+ result.putAll(attempts);
+ return result;
+ }
+
+ @Override
+ public TaskAttempt getAttempt(TaskAttemptId attemptID) {
+ return attempts.get(attemptID);
+ }
+
+ @Override
+ public boolean isFinished() {
+ for (TaskAttempt attempt : attempts.values()) {
+ if (attempt.getState() == TaskAttemptState.SUCCEEDED) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ @Override
+ public boolean canCommit(TaskAttemptId taskAttemptID) {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public TaskState getState() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ }
+
+ class MyJobImpl implements Job {
+ private final JobId jobID;
+ private final Map<TaskId, Task> allTasks = new HashMap<TaskId, Task>();
+ private final Map<TaskId, Task> mapTasks = new HashMap<TaskId, Task>();
+ private final Map<TaskId, Task> reduceTasks = new HashMap<TaskId, Task>();
+
+ MyJobImpl(JobId jobID, int numMaps, int numReduces) {
+ this.jobID = jobID;
+ for (int i = 0; i < numMaps; ++i) {
+ Task newTask = new MyTaskImpl(jobID, i, TaskType.MAP);
+ mapTasks.put(newTask.getID(), newTask);
+ allTasks.put(newTask.getID(), newTask);
+ }
+ for (int i = 0; i < numReduces; ++i) {
+ Task newTask = new MyTaskImpl(jobID, i, TaskType.REDUCE);
+ reduceTasks.put(newTask.getID(), newTask);
+ allTasks.put(newTask.getID(), newTask);
+ }
+
+ // give every task an attempt
+ for (Task task : allTasks.values()) {
+ MyTaskImpl myTaskImpl = (MyTaskImpl) task;
+ myTaskImpl.addAttempt();
+ }
+ }
+
+ @Override
+ public JobId getID() {
+ return jobID;
+ }
+
+ @Override
+ public JobState getState() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public JobReport getReport() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public Counters getCounters() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public Map<TaskId, Task> getTasks() {
+ return allTasks;
+ }
+
+ @Override
+ public Map<TaskId, Task> getTasks(TaskType taskType) {
+ return taskType == TaskType.MAP ? mapTasks : reduceTasks;
+ }
+
+ @Override
+ public Task getTask(TaskId taskID) {
+ return allTasks.get(taskID);
+ }
+
+ @Override
+ public List<String> getDiagnostics() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public int getCompletedMaps() {
+ return completedMaps.get();
+ }
+
+ @Override
+ public int getCompletedReduces() {
+ return completedReduces.get();
+ }
+
+ @Override
+ public TaskAttemptCompletionEvent[]
+ getTaskAttemptCompletionEvents(int fromEventId, int maxEvents) {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public String getName() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public int getTotalMaps() {
+ return mapTasks.size();
+ }
+
+ @Override
+ public int getTotalReduces() {
+ return reduceTasks.size();
+ }
+
+ @Override
+ public boolean isUber() {
+ return false;
+ }
+
+ @Override
+ public boolean checkAccess(UserGroupInformation callerUGI,
+ JobACL jobOperation) {
+ return true;
+ }
+ }
+
+ /*
+ * We follow the pattern of the real XxxImpl . We create a job and initialize
+ * it with a full suite of tasks which in turn have one attempt each in the
+ * NEW state. Attempts transition only from NEW to RUNNING to SUCCEEDED .
+ */
+ class MyTaskAttemptImpl implements TaskAttempt {
+ private final TaskAttemptId myAttemptID;
+
+ long startMockTime = Long.MIN_VALUE;
+
+ long shuffleCompletedTime = Long.MAX_VALUE;
+
+ TaskAttemptState overridingState = TaskAttemptState.NEW;
+
+ MyTaskAttemptImpl(TaskId taskID, int index, Clock clock) {
+ myAttemptID = recordFactory.newRecordInstance(TaskAttemptId.class);
+ myAttemptID.setId(index);
+ myAttemptID.setTaskId(taskID);
+ }
+
+ void startUp() {
+ startMockTime = clock.getTime();
+ overridingState = null;
+
+ slotsInUse.addAndGet(taskTypeSlots(myAttemptID.getTaskId().getTaskType()));
+
+ System.out.println("TLTRE.MyTaskAttemptImpl.startUp starting " + getID());
+
+ SpeculatorEvent event = new SpeculatorEvent(getID().getTaskId(), -1);
+ dispatcher.getEventHandler().handle(event);
+ }
+
+ @Override
+ public TaskAttemptId getID() {
+ return myAttemptID;
+ }
+
+ @Override
+ public TaskAttemptReport getReport() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public List<String> getDiagnostics() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public Counters getCounters() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ private float getCodeRuntime() {
+ int taskIndex = myAttemptID.getTaskId().getId();
+ int attemptIndex = myAttemptID.getId();
+
+ float result = 200.0F;
+
+ switch (taskIndex % 4) {
+ case 0:
+ if (taskIndex % 40 == 0 && attemptIndex == 0) {
+ result = 600.0F;
+ break;
+ }
+
+ break;
+ case 2:
+ break;
+
+ case 1:
+ result = 150.0F;
+ break;
+
+ case 3:
+ result = 250.0F;
+ break;
+ }
+
+ return result;
+ }
+
+ private float getMapProgress() {
+ float runtime = getCodeRuntime();
+
+ return Math.min
+ ((float) (clock.getTime() - startMockTime) / (runtime * 1000.0F), 1.0F);
+ }
+
+ private float getReduceProgress() {
+ Job job = myAppContext.getJob(myAttemptID.getTaskId().getJobId());
+ float runtime = getCodeRuntime();
+
+ Collection<Task> allMapTasks = job.getTasks(TaskType.MAP).values();
+
+ int numberMaps = allMapTasks.size();
+ int numberDoneMaps = 0;
+
+ for (Task mapTask : allMapTasks) {
+ if (mapTask.isFinished()) {
+ ++numberDoneMaps;
+ }
+ }
+
+ if (numberMaps == numberDoneMaps) {
+ shuffleCompletedTime = Math.min(shuffleCompletedTime, clock.getTime());
+
+ return Math.min
+ ((float) (clock.getTime() - shuffleCompletedTime)
+ / (runtime * 2000.0F) + 0.5F,
+ 1.0F);
+ } else {
+ return ((float) numberDoneMaps) / numberMaps * 0.5F;
+ }
+ }
+
+ // we compute progress from time and an algorithm now
+ @Override
+ public float getProgress() {
+ if (overridingState == TaskAttemptState.NEW) {
+ return 0.0F;
+ }
+ return myAttemptID.getTaskId().getTaskType() == TaskType.MAP ? getMapProgress() : getReduceProgress();
+ }
+
+ @Override
+ public TaskAttemptState getState() {
+ if (overridingState != null) {
+ return overridingState;
+ }
+ TaskAttemptState result
+ = getProgress() < 1.0F ? TaskAttemptState.RUNNING : TaskAttemptState.SUCCEEDED;
+
+ if (result == TaskAttemptState.SUCCEEDED) {
+ overridingState = TaskAttemptState.SUCCEEDED;
+
+ System.out.println("MyTaskAttemptImpl.getState() -- attempt " + myAttemptID + " finished.");
+
+ slotsInUse.addAndGet(- taskTypeSlots(myAttemptID.getTaskId().getTaskType()));
+
+ (myAttemptID.getTaskId().getTaskType() == TaskType.MAP
+ ? completedMaps : completedReduces).getAndIncrement();
+
+ // check for a spectacularly successful speculation
+ TaskId taskID = myAttemptID.getTaskId();
+
+ Task task = myJob.getTask(taskID);
+
+ for (TaskAttempt otherAttempt : task.getAttempts().values()) {
+ if (otherAttempt != this
+ && otherAttempt.getState() == TaskAttemptState.RUNNING) {
+ // we had two instances running. Try to determine how much
+ // we might have saved by speculation
+ if (getID().getId() > otherAttempt.getID().getId()) {
+ // the speculation won
+ successfulSpeculations.getAndIncrement();
+ float hisProgress = otherAttempt.getProgress();
+ long hisStartTime = ((MyTaskAttemptImpl)otherAttempt).startMockTime;
+ System.out.println("TLTRE:A speculation finished at time "
+ + clock.getTime()
+ + ". The stalled attempt is at " + (hisProgress * 100.0)
+ + "% progress, and it started at "
+ + hisStartTime + ", which is "
+ + (clock.getTime() - hisStartTime) + " ago.");
+ long originalTaskEndEstimate
+ = (hisStartTime
+ + estimator.estimatedRuntime(otherAttempt.getID()));
+ System.out.println(
+ "TLTRE: We would have expected the original attempt to take "
+ + estimator.estimatedRuntime(otherAttempt.getID())
+ + ", finishing at " + originalTaskEndEstimate);
+ long estimatedSavings = originalTaskEndEstimate - clock.getTime();
+ taskTimeSavedBySpeculation.addAndGet(estimatedSavings);
+ System.out.println("TLTRE: The task is " + task.getID());
+ slotsInUse.addAndGet(- taskTypeSlots(myAttemptID.getTaskId().getTaskType()));
+ ((MyTaskAttemptImpl)otherAttempt).overridingState
+ = TaskAttemptState.KILLED;
+ } else {
+ System.out.println(
+ "TLTRE: The normal attempt beat the speculation in "
+ + task.getID());
+ }
+ }
+ }
+ }
+
+ return result;
+ }
+
+ @Override
+ public boolean isFinished() {
+ return getProgress() == 1.0F;
+ }
+
+ @Override
+ public ContainerId getAssignedContainerID() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public String getNodeHttpAddress() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public long getLaunchTime() {
+ return startMockTime;
+ }
+
+ @Override
+ public long getFinishTime() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public String getAssignedContainerMgrAddress() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+ }
+
+ static class MockClock implements Clock {
+ private long currentTime = 0;
+
+ public long getTime() {
+ return currentTime;
+ }
+
+ void setMeasuredTime(long newTime) {
+ currentTime = newTime;
+ }
+
+ void advanceTime(long increment) {
+ currentTime += increment;
+ }
+ }
+
+ class MyAppMaster extends CompositeService {
+ final Clock clock;
+ public MyAppMaster(Clock clock) {
+ super(MyAppMaster.class.getName());
+ if (clock == null) {
+ clock = new SystemClock();
+ }
+ this.clock = clock;
+ LOG.info("Created MyAppMaster");
+ }
+ }
+
+ class MyAppContext implements AppContext {
+ // I'll be making Avro objects by hand. Please don't do that very often.
+
+ private final ApplicationAttemptId myAppAttemptID;
+ private final ApplicationId myApplicationID;
+ private final JobId myJobID;
+ private final Map<JobId, Job> allJobs;
+
+ MyAppContext(int numberMaps, int numberReduces) {
+ myApplicationID = recordFactory.newRecordInstance(ApplicationId.class);
+ myApplicationID.setClusterTimestamp(clock.getTime());
+ myApplicationID.setId(1);
+
+ myAppAttemptID = recordFactory
+ .newRecordInstance(ApplicationAttemptId.class);
+ myAppAttemptID.setApplicationId(myApplicationID);
+ myAppAttemptID.setAttemptId(0);
+ myJobID = recordFactory.newRecordInstance(JobId.class);
+ myJobID.setAppId(myApplicationID);
+
+ Job myJob
+ = new MyJobImpl(myJobID, numberMaps, numberReduces);
+
+ allJobs = Collections.singletonMap(myJobID, myJob);
+ }
+
+ @Override
+ public ApplicationAttemptId getApplicationAttemptId() {
+ return myAppAttemptID;
+ }
+
+ @Override
+ public ApplicationId getApplicationID() {
+ return myApplicationID;
+ }
+
+ @Override
+ public Job getJob(JobId jobID) {
+ return allJobs.get(jobID);
+ }
+
+ @Override
+ public Map<JobId, Job> getAllJobs() {
+ return allJobs;
+ }
+
+ @Override
+ public EventHandler getEventHandler() {
+ return dispatcher.getEventHandler();
+ }
+
+ @Override
+ public CharSequence getUser() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public Clock getClock() {
+ return clock;
+ }
+
+ @Override
+ public String getApplicationName() {
+ return null;
+ }
+
+ @Override
+ public long getStartTime() {
+ return 0;
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java
new file mode 100644
index 0000000..9bc18f9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java
@@ -0,0 +1,136 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.impl;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.HashMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
+import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl.JobNoTasksCompletedTransition;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
+import org.apache.hadoop.mapreduce.v2.app.MRApp;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.junit.Test;
+import org.junit.Assert;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.any;
+import org.mockito.ArgumentMatcher;
+import org.mockito.Mockito;
+
+
+/**
+ * Tests various functions of the JobImpl class
+ */
+public class TestJobImpl {
+
+ @Test
+ public void testJobNoTasksTransition() {
+ JobNoTasksCompletedTransition trans = new JobNoTasksCompletedTransition();
+ JobImpl mockJob = mock(JobImpl.class);
+
+ // Force checkJobCompleteSuccess to return null
+ Task mockTask = mock(Task.class);
+ Map<TaskId, Task> tasks = new HashMap<TaskId, Task>();
+ tasks.put(mockTask.getID(), mockTask);
+ when(mockJob.getTasks()).thenReturn(tasks);
+
+ when(mockJob.getState()).thenReturn(JobState.ERROR);
+ JobEvent mockJobEvent = mock(JobEvent.class);
+ JobState state = trans.transition(mockJob, mockJobEvent);
+ Assert.assertEquals("Incorrect state returned from JobNoTasksCompletedTransition",
+ JobState.ERROR, state);
+ }
+
+ @Test
+ public void testCheckJobCompleteSuccess() {
+
+ JobImpl mockJob = mock(JobImpl.class);
+ OutputCommitter mockCommitter = mock(OutputCommitter.class);
+ EventHandler mockEventHandler = mock(EventHandler.class);
+ JobContext mockJobContext = mock(JobContext.class);
+
+ when(mockJob.getCommitter()).thenReturn(mockCommitter);
+ when(mockJob.getEventHandler()).thenReturn(mockEventHandler);
+ when(mockJob.getJobContext()).thenReturn(mockJobContext);
+ doNothing().when(mockJob).setFinishTime();
+ doNothing().when(mockJob).logJobHistoryFinishedEvent();
+ when(mockJob.finished(any(JobState.class))).thenReturn(JobState.SUCCEEDED);
+
+ try {
+ doNothing().when(mockCommitter).commitJob(any(JobContext.class));
+ } catch (IOException e) {
+ // commitJob stubbed out, so this can't happen
+ }
+ doNothing().when(mockEventHandler).handle(any(JobHistoryEvent.class));
+ Assert.assertNotNull("checkJobCompleteSuccess incorrectly returns null " +
+ "for successful job",
+ JobImpl.checkJobCompleteSuccess(mockJob));
+ Assert.assertEquals("checkJobCompleteSuccess returns incorrect state",
+ JobImpl.checkJobCompleteSuccess(mockJob), JobState.SUCCEEDED);
+ }
+
+ @Test
+ public void testCheckJobCompleteSuccessFailed() {
+ JobImpl mockJob = mock(JobImpl.class);
+
+ // Make the completedTasks not equal the getTasks()
+ Task mockTask = mock(Task.class);
+ Map<TaskId, Task> tasks = new HashMap<TaskId, Task>();
+ tasks.put(mockTask.getID(), mockTask);
+ when(mockJob.getTasks()).thenReturn(tasks);
+
+ try {
+ // Just in case the code breaks and reaches these calls
+ OutputCommitter mockCommitter = mock(OutputCommitter.class);
+ EventHandler mockEventHandler = mock(EventHandler.class);
+ doNothing().when(mockCommitter).commitJob(any(JobContext.class));
+ doNothing().when(mockEventHandler).handle(any(JobHistoryEvent.class));
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ Assert.assertNull("checkJobCompleteSuccess incorrectly returns not-null " +
+ "for unsuccessful job",
+ JobImpl.checkJobCompleteSuccess(mockJob));
+ }
+
+
+ public static void main(String[] args) throws Exception {
+ TestJobImpl t = new TestJobImpl();
+ t.testJobNoTasksTransition();
+ t.testCheckJobCompleteSuccess();
+ t.testCheckJobCompleteSuccessFailed();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/metrics/TestMRAppMetrics.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/metrics/TestMRAppMetrics.java
new file mode 100644
index 0000000..8bfc2a8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/metrics/TestMRAppMetrics.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.v2.app.metrics;
+
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+
+import static org.apache.hadoop.test.MetricsAsserts.*;
+import static org.apache.hadoop.test.MockitoMaker.*;
+
+import org.junit.Test;
+
+import static org.mockito.Mockito.*;
+
+public class TestMRAppMetrics {
+
+ @Test public void testNames() {
+ Job job = mock(Job.class);
+ Task mapTask = make(stub(Task.class).returning(TaskType.MAP).
+ from.getType());
+ Task reduceTask = make(stub(Task.class).returning(TaskType.REDUCE).
+ from.getType());
+ MRAppMetrics metrics = MRAppMetrics.create();
+
+ metrics.submittedJob(job);
+ metrics.waitingTask(mapTask);
+ metrics.waitingTask(reduceTask);
+ metrics.preparingJob(job);
+ metrics.submittedJob(job);
+ metrics.waitingTask(mapTask);
+ metrics.waitingTask(reduceTask);
+ metrics.preparingJob(job);
+ metrics.submittedJob(job);
+ metrics.waitingTask(mapTask);
+ metrics.waitingTask(reduceTask);
+ metrics.preparingJob(job);
+ metrics.endPreparingJob(job);
+ metrics.endPreparingJob(job);
+ metrics.endPreparingJob(job);
+
+ metrics.runningJob(job);
+ metrics.launchedTask(mapTask);
+ metrics.runningTask(mapTask);
+ metrics.failedTask(mapTask);
+ metrics.endWaitingTask(reduceTask);
+ metrics.endRunningTask(mapTask);
+ metrics.endRunningJob(job);
+ metrics.failedJob(job);
+
+ metrics.runningJob(job);
+ metrics.launchedTask(mapTask);
+ metrics.runningTask(mapTask);
+ metrics.killedTask(mapTask);
+ metrics.endWaitingTask(reduceTask);
+ metrics.endRunningTask(mapTask);
+ metrics.endRunningJob(job);
+ metrics.killedJob(job);
+
+ metrics.runningJob(job);
+ metrics.launchedTask(mapTask);
+ metrics.runningTask(mapTask);
+ metrics.completedTask(mapTask);
+ metrics.endRunningTask(mapTask);
+ metrics.launchedTask(reduceTask);
+ metrics.runningTask(reduceTask);
+ metrics.completedTask(reduceTask);
+ metrics.endRunningTask(reduceTask);
+ metrics.endRunningJob(job);
+ metrics.completedJob(job);
+
+ checkMetrics(/*job*/3, 1, 1, 1, 0, 0,
+ /*map*/3, 1, 1, 1, 0, 0,
+ /*reduce*/1, 1, 0, 0, 0, 0);
+ }
+
+ private void checkMetrics(int jobsSubmitted, int jobsCompleted,
+ int jobsFailed, int jobsKilled, int jobsPreparing, int jobsRunning,
+ int mapsLaunched, int mapsCompleted, int mapsFailed, int mapsKilled,
+ int mapsRunning, int mapsWaiting, int reducesLaunched,
+ int reducesCompleted, int reducesFailed, int reducesKilled,
+ int reducesRunning, int reducesWaiting) {
+ MetricsRecordBuilder rb = getMetrics("MRAppMetrics");
+ assertCounter("JobsSubmitted", jobsSubmitted, rb);
+ assertCounter("JobsCompleted", jobsCompleted, rb);
+ assertCounter("JobsFailed", jobsFailed, rb);
+ assertCounter("JobsKilled", jobsKilled, rb);
+ assertGauge("JobsPreparing", jobsPreparing, rb);
+ assertGauge("JobsRunning", jobsRunning, rb);
+
+ assertCounter("MapsLaunched", mapsLaunched, rb);
+ assertCounter("MapsCompleted", mapsCompleted, rb);
+ assertCounter("MapsFailed", mapsFailed, rb);
+ assertCounter("MapsKilled", mapsKilled, rb);
+ assertGauge("MapsRunning", mapsRunning, rb);
+ assertGauge("MapsWaiting", mapsWaiting, rb);
+
+ assertCounter("ReducesLaunched", reducesLaunched, rb);
+ assertCounter("ReducesCompleted", reducesCompleted, rb);
+ assertCounter("ReducesFailed", reducesFailed, rb);
+ assertCounter("ReducesKilled", reducesKilled, rb);
+ assertGauge("ReducesRunning", reducesRunning, rb);
+ assertGauge("ReducesWaiting", reducesWaiting, rb);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/TestDataStatistics.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/TestDataStatistics.java
new file mode 100644
index 0000000..d5b817c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/TestDataStatistics.java
@@ -0,0 +1,73 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.speculate;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestDataStatistics {
+
+ private static final double TOL = 0.001;
+
+ @Test
+ public void testEmptyDataStatistics() throws Exception {
+ DataStatistics statistics = new DataStatistics();
+ Assert.assertEquals(0, statistics.count(), TOL);
+ Assert.assertEquals(0, statistics.mean(), TOL);
+ Assert.assertEquals(0, statistics.var(), TOL);
+ Assert.assertEquals(0, statistics.std(), TOL);
+ Assert.assertEquals(0, statistics.outlier(1.0f), TOL);
+ }
+
+ @Test
+ public void testSingleEntryDataStatistics() throws Exception {
+ DataStatistics statistics = new DataStatistics(17.29);
+ Assert.assertEquals(1, statistics.count(), TOL);
+ Assert.assertEquals(17.29, statistics.mean(), TOL);
+ Assert.assertEquals(0, statistics.var(), TOL);
+ Assert.assertEquals(0, statistics.std(), TOL);
+ Assert.assertEquals(17.29, statistics.outlier(1.0f), TOL);
+ }
+
+ @Test
+ public void testMutiEntryDataStatistics() throws Exception {
+ DataStatistics statistics = new DataStatistics();
+ statistics.add(17);
+ statistics.add(29);
+ Assert.assertEquals(2, statistics.count(), TOL);
+ Assert.assertEquals(23.0, statistics.mean(), TOL);
+ Assert.assertEquals(36.0, statistics.var(), TOL);
+ Assert.assertEquals(6.0, statistics.std(), TOL);
+ Assert.assertEquals(29.0, statistics.outlier(1.0f), TOL);
+ }
+
+ @Test
+ public void testUpdateStatistics() throws Exception {
+ DataStatistics statistics = new DataStatistics(17);
+ statistics.add(29);
+ Assert.assertEquals(2, statistics.count(), TOL);
+ Assert.assertEquals(23.0, statistics.mean(), TOL);
+ Assert.assertEquals(36.0, statistics.var(), TOL);
+
+ statistics.updateStatistics(17, 29);
+ Assert.assertEquals(2, statistics.count(), TOL);
+ Assert.assertEquals(29.0, statistics.mean(), TOL);
+ Assert.assertEquals(0.0, statistics.var(), TOL);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebApp.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebApp.java
new file mode 100644
index 0000000..474e39d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebApp.java
@@ -0,0 +1,136 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.APP_ID;
+import static org.junit.Assert.assertEquals;
+
+import java.util.Map;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.MockJobs;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.util.Apps;
+import org.apache.hadoop.yarn.webapp.WebApps;
+import org.apache.hadoop.yarn.webapp.test.WebAppTests;
+import org.junit.Test;
+
+import com.google.inject.Injector;
+
+public class TestAMWebApp {
+
+ static class TestAppContext implements AppContext {
+ final ApplicationAttemptId appAttemptID;
+ final ApplicationId appID;
+ final String user = MockJobs.newUserName();
+ final Map<JobId, Job> jobs;
+ final long startTime = System.currentTimeMillis();
+
+ TestAppContext(int appid, int numJobs, int numTasks, int numAttempts) {
+ appID = MockJobs.newAppID(appid);
+ appAttemptID = MockJobs.newAppAttemptID(appID, 0);
+ jobs = MockJobs.newJobs(appID, numJobs, numTasks, numAttempts);
+ }
+
+ TestAppContext() {
+ this(0, 1, 1, 1);
+ }
+
+ @Override
+ public ApplicationAttemptId getApplicationAttemptId() {
+ return appAttemptID;
+ }
+
+ @Override
+ public ApplicationId getApplicationID() {
+ return appID;
+ }
+
+ @Override
+ public CharSequence getUser() {
+ return user;
+ }
+
+ @Override
+ public Job getJob(JobId jobID) {
+ return jobs.get(jobID);
+ }
+
+ @Override
+ public Map<JobId, Job> getAllJobs() {
+ return jobs; // OK
+ }
+
+ @Override
+ public EventHandler getEventHandler() {
+ return null;
+ }
+
+ @Override
+ public Clock getClock() {
+ return null;
+ }
+
+ @Override
+ public String getApplicationName() {
+ return "TestApp";
+ }
+
+ @Override
+ public long getStartTime() {
+ return startTime;
+ }
+ }
+
+ @Test public void testAppControllerIndex() {
+ TestAppContext ctx = new TestAppContext();
+ Injector injector = WebAppTests.createMockInjector(AppContext.class, ctx);
+ AppController controller = injector.getInstance(AppController.class);
+ controller.index();
+ assertEquals(Apps.toString(ctx.appID), controller.get(APP_ID,""));
+ }
+
+ @Test public void testAppView() {
+ WebAppTests.testPage(AppView.class, AppContext.class, new TestAppContext());
+ }
+
+ @Test public void testJobView() {
+ WebAppTests.testPage(JobPage.class, AppContext.class, new TestAppContext());
+ }
+
+ @Test public void testTasksView() {
+ WebAppTests.testPage(TasksPage.class, AppContext.class,
+ new TestAppContext());
+ }
+
+ @Test public void testTaskView() {
+ WebAppTests.testPage(TaskPage.class, AppContext.class,
+ new TestAppContext());
+ }
+
+ public static void main(String[] args) {
+ WebApps.$for("yarn", AppContext.class, new TestAppContext(0, 8, 88, 4)).
+ at(58888).inDevMode().start(new AMWebApp()).joinThread();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/pom.xml b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/pom.xml
new file mode 100644
index 0000000..5ffc8dc
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/pom.xml
@@ -0,0 +1,94 @@
+<?xml version="1.0"?>
+<project>
+ <parent>
+ <artifactId>hadoop-mapreduce-client</artifactId>
+ <groupId>org.apache.hadoop</groupId>
+ <version>${hadoop-mapreduce.version}</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-common</artifactId>
+ <name>hadoop-mapreduce-client-common</name>
+
+ <properties>
+ <install.file>${project.artifact.file}</install.file>
+ <mr.basedir>${project.parent.parent.basedir}</mr.basedir>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-core</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>create-protobuf-generated-sources-directory</id>
+ <phase>initialize</phase>
+ <configuration>
+ <target>
+ <mkdir dir="target/generated-sources/proto" />
+ </target>
+ </configuration>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>generate-sources</id>
+ <phase>generate-sources</phase>
+ <configuration>
+ <executable>protoc</executable>
+ <arguments>
+ <argument>-I../../hadoop-yarn/hadoop-yarn-api/src/main/proto/</argument>
+ <argument>-Isrc/main/proto/</argument>
+ <argument>--java_out=target/generated-sources/proto</argument>
+ <argument>src/main/proto/mr_protos.proto</argument>
+ <argument>src/main/proto/mr_service_protos.proto</argument>
+ <argument>src/main/proto/MRClientProtocol.proto</argument>
+ </arguments>
+ </configuration>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>add-source</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>add-source</goal>
+ </goals>
+ <configuration>
+ <sources>
+ <source>target/generated-sources/proto</source>
+ </sources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/avro/MRClientProtocol.genavro b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/avro/MRClientProtocol.genavro
new file mode 100644
index 0000000..fdf98ab
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/avro/MRClientProtocol.genavro
@@ -0,0 +1,153 @@
+@namespace("org.apache.hadoop.mapreduce.v2.api")
+protocol MRClientProtocol {
+
+ import idl "./yarn/yarn-api/src/main/avro/yarn-types.genavro";
+
+ enum TaskType {
+ MAP,
+ REDUCE
+ }
+
+ record JobID {
+ org.apache.hadoop.yarn.ApplicationID appID;
+ int id;
+ }
+
+ record TaskID {
+ JobID jobID;
+ TaskType taskType;
+ int id;
+ }
+
+ record TaskAttemptID {
+ TaskID taskID;
+ int id;
+ }
+
+ enum TaskState {
+ NEW,
+ SCHEDULED,
+ RUNNING,
+ SUCCEEDED,
+ FAILED,
+ KILL_WAIT,
+ KILLED
+ }
+
+ enum Phase {
+ STARTING,
+ MAP,
+ SHUFFLE,
+ SORT,
+ REDUCE,
+ CLEANUP
+ }
+
+ record Counter {
+ string name;
+ string displayName;
+ long value;
+ }
+
+ record CounterGroup {
+ string name;
+ string displayname;
+ map<Counter> counters;
+ }
+
+ record Counters {
+ map<CounterGroup> groups;
+ }
+
+ record TaskReport {
+ TaskID id;
+ TaskState state;
+ float progress;
+ long startTime;
+ long finishTime;
+ Counters counters;
+ array<TaskAttemptID> runningAttempts;
+ union{TaskAttemptID, null} successfulAttempt;
+ array<string> diagnostics;
+ }
+
+ enum TaskAttemptState {
+ NEW,
+ UNASSIGNED,
+ ASSIGNED,
+ RUNNING,
+ COMMIT_PENDING,
+ SUCCESS_CONTAINER_CLEANUP,
+ SUCCEEDED,
+ FAIL_CONTAINER_CLEANUP,
+ FAIL_TASK_CLEANUP,
+ FAILED,
+ KILL_CONTAINER_CLEANUP,
+ KILL_TASK_CLEANUP,
+ KILLED
+ }
+
+ record TaskAttemptReport {
+ TaskAttemptID id;
+ TaskAttemptState state;
+ float progress;
+ long startTime;
+ long finishTime;
+ Counters counters;
+ string diagnosticInfo;
+ string stateString;
+ Phase phase;
+ }
+
+ enum JobState {
+ NEW,
+ INITED,
+ RUNNING,
+ SUCCEEDED,
+ FAILED,
+ KILL_WAIT,
+ KILLED,
+ ERROR
+ }
+
+ record JobReport {
+ JobID id;
+ JobState state;
+ float mapProgress;
+ float reduceProgress;
+ float cleanupProgress;
+ float setupProgress;
+ long startTime;
+ long finishTime;
+ }
+
+ enum TaskAttemptCompletionEventStatus {
+ FAILED,
+ KILLED,
+ SUCCEEDED,
+ OBSOLETE,
+ TIPFAILED
+ }
+
+ record TaskAttemptCompletionEvent {
+ TaskAttemptID attemptId;
+ TaskAttemptCompletionEventStatus status;
+ string mapOutputServerAddress;
+ int attemptRunTime;
+ int eventId;
+ }
+
+ JobReport getJobReport(JobID jobID) throws org.apache.hadoop.yarn.YarnRemoteException;
+ TaskReport getTaskReport(TaskID taskID) throws org.apache.hadoop.yarn.YarnRemoteException;
+ TaskAttemptReport getTaskAttemptReport(TaskAttemptID taskAttemptID) throws org.apache.hadoop.yarn.YarnRemoteException;
+ Counters getCounters(JobID jobID) throws org.apache.hadoop.yarn.YarnRemoteException;
+ array<TaskAttemptCompletionEvent> getTaskAttemptCompletionEvents(JobID jobID, int fromEventId, int maxEvents) throws org.apache.hadoop.yarn.YarnRemoteException;
+ array<TaskReport> getTaskReports(JobID jobID, TaskType taskType) throws org.apache.hadoop.yarn.YarnRemoteException;
+ array<string> getDiagnostics(TaskAttemptID taskAttemptID) throws org.apache.hadoop.yarn.YarnRemoteException;
+
+ void killJob(JobID jobID) throws org.apache.hadoop.yarn.YarnRemoteException;
+ void killTask(TaskID taskID) throws org.apache.hadoop.yarn.YarnRemoteException;
+ void killTaskAttempt(TaskAttemptID taskAttemptID) throws org.apache.hadoop.yarn.YarnRemoteException;
+ void failTaskAttempt(TaskAttemptID taskAttemptID) throws org.apache.hadoop.yarn.YarnRemoteException;
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
new file mode 100644
index 0000000..f6819fd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
@@ -0,0 +1,459 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+
+import org.apache.hadoop.mapred.JobPriority;
+import org.apache.hadoop.mapred.TaskCompletionEvent;
+import org.apache.hadoop.mapreduce.JobStatus.State;
+import org.apache.hadoop.mapreduce.v2.api.records.Counter;
+import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.Phase;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.QueueACL;
+import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+
+public class TypeConverter {
+
+ private static RecordFactory recordFactory;
+
+ static {
+ recordFactory = RecordFactoryProvider.getRecordFactory(null);
+ }
+
+ public static org.apache.hadoop.mapred.JobID fromYarn(JobId id) {
+ String identifier = fromClusterTimeStamp(id.getAppId().getClusterTimestamp());
+ return new org.apache.hadoop.mapred.JobID(identifier, id.getId());
+ }
+
+ //currently there is 1-1 mapping between appid and jobid
+ public static org.apache.hadoop.mapreduce.JobID fromYarn(ApplicationId appID) {
+ String identifier = fromClusterTimeStamp(appID.getClusterTimestamp());
+ return new org.apache.hadoop.mapred.JobID(identifier, appID.getId());
+ }
+
+ public static JobId toYarn(org.apache.hadoop.mapreduce.JobID id) {
+ JobId jobId = recordFactory.newRecordInstance(JobId.class);
+ jobId.setId(id.getId()); //currently there is 1-1 mapping between appid and jobid
+
+ ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class);
+ appId.setId(id.getId());
+ appId.setClusterTimestamp(toClusterTimeStamp(id.getJtIdentifier()));
+ jobId.setAppId(appId);
+ return jobId;
+ }
+
+ private static String fromClusterTimeStamp(long clusterTimeStamp) {
+ return Long.toString(clusterTimeStamp);
+ }
+
+ private static long toClusterTimeStamp(String identifier) {
+ return Long.parseLong(identifier);
+ }
+
+ public static org.apache.hadoop.mapreduce.TaskType fromYarn(
+ TaskType taskType) {
+ switch (taskType) {
+ case MAP:
+ return org.apache.hadoop.mapreduce.TaskType.MAP;
+ case REDUCE:
+ return org.apache.hadoop.mapreduce.TaskType.REDUCE;
+ default:
+ throw new YarnException("Unrecognized task type: " + taskType);
+ }
+ }
+
+ public static TaskType
+ toYarn(org.apache.hadoop.mapreduce.TaskType taskType) {
+ switch (taskType) {
+ case MAP:
+ return TaskType.MAP;
+ case REDUCE:
+ return TaskType.REDUCE;
+ default:
+ throw new YarnException("Unrecognized task type: " + taskType);
+ }
+ }
+
+ public static org.apache.hadoop.mapred.TaskID fromYarn(TaskId id) {
+ return new org.apache.hadoop.mapred.TaskID(fromYarn(id.getJobId()), fromYarn(id.getTaskType()),
+ id.getId());
+ }
+
+ public static TaskId toYarn(org.apache.hadoop.mapreduce.TaskID id) {
+ TaskId taskId = recordFactory.newRecordInstance(TaskId.class);
+ taskId.setId(id.getId());
+ taskId.setTaskType(toYarn(id.getTaskType()));
+ taskId.setJobId(toYarn(id.getJobID()));
+ return taskId;
+ }
+
+ public static TaskAttemptState toYarn(org.apache.hadoop.mapred.TaskStatus.State state) {
+ if (state == org.apache.hadoop.mapred.TaskStatus.State.KILLED_UNCLEAN) {
+ return TaskAttemptState.KILLED;
+ }
+ if (state == org.apache.hadoop.mapred.TaskStatus.State.FAILED_UNCLEAN) {
+ return TaskAttemptState.FAILED;
+ }
+ return TaskAttemptState.valueOf(state.toString());
+ }
+
+ public static Phase toYarn(org.apache.hadoop.mapred.TaskStatus.Phase phase) {
+ switch (phase) {
+ case STARTING:
+ return Phase.STARTING;
+ case MAP:
+ return Phase.MAP;
+ case SHUFFLE:
+ return Phase.SHUFFLE;
+ case SORT:
+ return Phase.SORT;
+ case REDUCE:
+ return Phase.REDUCE;
+ case CLEANUP:
+ return Phase.CLEANUP;
+ }
+ throw new YarnException("Unrecognized Phase: " + phase);
+ }
+
+ public static TaskCompletionEvent[] fromYarn(
+ TaskAttemptCompletionEvent[] newEvents) {
+ TaskCompletionEvent[] oldEvents =
+ new TaskCompletionEvent[newEvents.length];
+ int i = 0;
+ for (TaskAttemptCompletionEvent newEvent
+ : newEvents) {
+ oldEvents[i++] = fromYarn(newEvent);
+ }
+ return oldEvents;
+ }
+
+ public static TaskCompletionEvent fromYarn(
+ TaskAttemptCompletionEvent newEvent) {
+ return new TaskCompletionEvent(newEvent.getEventId(),
+ fromYarn(newEvent.getAttemptId()), newEvent.getAttemptId().getId(),
+ newEvent.getAttemptId().getTaskId().getTaskType().equals(TaskType.MAP),
+ fromYarn(newEvent.getStatus()),
+ newEvent.getMapOutputServerAddress());
+ }
+
+ public static TaskCompletionEvent.Status fromYarn(
+ TaskAttemptCompletionEventStatus newStatus) {
+ switch (newStatus) {
+ case FAILED:
+ return TaskCompletionEvent.Status.FAILED;
+ case KILLED:
+ return TaskCompletionEvent.Status.KILLED;
+ case OBSOLETE:
+ return TaskCompletionEvent.Status.OBSOLETE;
+ case SUCCEEDED:
+ return TaskCompletionEvent.Status.SUCCEEDED;
+ case TIPFAILED:
+ return TaskCompletionEvent.Status.TIPFAILED;
+ }
+ throw new YarnException("Unrecognized status: " + newStatus);
+ }
+
+ public static org.apache.hadoop.mapred.TaskAttemptID fromYarn(
+ TaskAttemptId id) {
+ return new org.apache.hadoop.mapred.TaskAttemptID(fromYarn(id.getTaskId()),
+ id.getId());
+ }
+
+ public static TaskAttemptId toYarn(
+ org.apache.hadoop.mapred.TaskAttemptID id) {
+ TaskAttemptId taskAttemptId = recordFactory.newRecordInstance(TaskAttemptId.class);
+ taskAttemptId.setTaskId(toYarn(id.getTaskID()));
+ taskAttemptId.setId(id.getId());
+ return taskAttemptId;
+ }
+
+ public static TaskAttemptId toYarn(
+ org.apache.hadoop.mapreduce.TaskAttemptID id) {
+ TaskAttemptId taskAttemptId = recordFactory.newRecordInstance(TaskAttemptId.class);
+ taskAttemptId.setTaskId(toYarn(id.getTaskID()));
+ taskAttemptId.setId(id.getId());
+ return taskAttemptId;
+ }
+
+ public static org.apache.hadoop.mapreduce.Counters fromYarn(
+ Counters yCntrs) {
+ if (yCntrs == null) {
+ return null;
+ }
+ org.apache.hadoop.mapreduce.Counters counters =
+ new org.apache.hadoop.mapreduce.Counters();
+ for (CounterGroup yGrp : yCntrs.getAllCounterGroups().values()) {
+ counters.addGroup(yGrp.getName(), yGrp.getDisplayName());
+ for (Counter yCntr : yGrp.getAllCounters().values()) {
+ org.apache.hadoop.mapreduce.Counter c =
+ counters.findCounter(yGrp.getName(),
+ yCntr.getName());
+ c.setValue(yCntr.getValue());
+ }
+ }
+ return counters;
+ }
+
+ public static Counters toYarn(org.apache.hadoop.mapred.Counters counters) {
+ if (counters == null) {
+ return null;
+ }
+ Counters yCntrs = recordFactory.newRecordInstance(Counters.class);
+ yCntrs.addAllCounterGroups(new HashMap<String, CounterGroup>());
+ for (org.apache.hadoop.mapred.Counters.Group grp : counters) {
+ CounterGroup yGrp = recordFactory.newRecordInstance(CounterGroup.class);
+ yGrp.setName(grp.getName());
+ yGrp.setDisplayName(grp.getDisplayName());
+ yGrp.addAllCounters(new HashMap<String, Counter>());
+ for (org.apache.hadoop.mapred.Counters.Counter cntr : grp) {
+ Counter yCntr = recordFactory.newRecordInstance(Counter.class);
+ yCntr.setName(cntr.getName());
+ yCntr.setDisplayName(cntr.getDisplayName());
+ yCntr.setValue(cntr.getValue());
+ yGrp.setCounter(yCntr.getName(), yCntr);
+ }
+ yCntrs.setCounterGroup(yGrp.getName(), yGrp);
+ }
+ return yCntrs;
+ }
+
+ public static Counters toYarn(org.apache.hadoop.mapreduce.Counters counters) {
+ if (counters == null) {
+ return null;
+ }
+ Counters yCntrs = recordFactory.newRecordInstance(Counters.class);
+ yCntrs.addAllCounterGroups(new HashMap<String, CounterGroup>());
+ for (org.apache.hadoop.mapreduce.CounterGroup grp : counters) {
+ CounterGroup yGrp = recordFactory.newRecordInstance(CounterGroup.class);
+ yGrp.setName(grp.getName());
+ yGrp.setDisplayName(grp.getDisplayName());
+ yGrp.addAllCounters(new HashMap<String, Counter>());
+ for (org.apache.hadoop.mapreduce.Counter cntr : grp) {
+ Counter yCntr = recordFactory.newRecordInstance(Counter.class);
+ yCntr.setName(cntr.getName());
+ yCntr.setDisplayName(cntr.getDisplayName());
+ yCntr.setValue(cntr.getValue());
+ yGrp.setCounter(yCntr.getName(), yCntr);
+ }
+ yCntrs.setCounterGroup(yGrp.getName(), yGrp);
+ }
+ return yCntrs;
+ }
+
+ public static org.apache.hadoop.mapred.JobStatus fromYarn(
+ JobReport jobreport, String jobFile, String trackingUrl) {
+ String user = null, jobName = null;
+ JobPriority jobPriority = JobPriority.NORMAL;
+ return new org.apache.hadoop.mapred.JobStatus(fromYarn(jobreport.getJobId()),
+ jobreport.getSetupProgress(), jobreport.getMapProgress(),
+ jobreport.getReduceProgress(), jobreport.getCleanupProgress(),
+ fromYarn(jobreport.getJobState()),
+ jobPriority, user, jobName, jobFile, trackingUrl);
+ }
+
+ public static int fromYarn(JobState state) {
+ switch (state) {
+ case NEW:
+ case INITED:
+ return org.apache.hadoop.mapred.JobStatus.PREP;
+ case RUNNING:
+ return org.apache.hadoop.mapred.JobStatus.RUNNING;
+ case KILL_WAIT:
+ case KILLED:
+ return org.apache.hadoop.mapred.JobStatus.KILLED;
+ case SUCCEEDED:
+ return org.apache.hadoop.mapred.JobStatus.SUCCEEDED;
+ case FAILED:
+ case ERROR:
+ return org.apache.hadoop.mapred.JobStatus.FAILED;
+ }
+ throw new YarnException("Unrecognized job state: " + state);
+ }
+
+ public static org.apache.hadoop.mapred.TIPStatus fromYarn(
+ TaskState state) {
+ switch (state) {
+ case NEW:
+ case SCHEDULED:
+ return org.apache.hadoop.mapred.TIPStatus.PENDING;
+ case RUNNING:
+ return org.apache.hadoop.mapred.TIPStatus.RUNNING;
+ case KILL_WAIT:
+ case KILLED:
+ return org.apache.hadoop.mapred.TIPStatus.KILLED;
+ case SUCCEEDED:
+ return org.apache.hadoop.mapred.TIPStatus.COMPLETE;
+ case FAILED:
+ return org.apache.hadoop.mapred.TIPStatus.FAILED;
+ }
+ throw new YarnException("Unrecognized task state: " + state);
+ }
+
+ public static TaskReport fromYarn(org.apache.hadoop.mapreduce.v2.api.records.TaskReport report) {
+ String[] diagnostics = null;
+ if (report.getDiagnosticsList() != null) {
+ diagnostics = new String[report.getDiagnosticsCount()];
+ int i = 0;
+ for (String cs : report.getDiagnosticsList()) {
+ diagnostics[i++] = cs.toString();
+ }
+ } else {
+ diagnostics = new String[0];
+ }
+
+ TaskReport rep = new TaskReport(fromYarn(report.getTaskId()),
+ report.getProgress(), report.getTaskState().toString(),
+ diagnostics, fromYarn(report.getTaskState()), report.getStartTime(), report.getFinishTime(),
+ fromYarn(report.getCounters()));
+ List<org.apache.hadoop.mapreduce.TaskAttemptID> runningAtts
+ = new ArrayList<org.apache.hadoop.mapreduce.TaskAttemptID>();
+ for (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId id
+ : report.getRunningAttemptsList()) {
+ runningAtts.add(fromYarn(id));
+ }
+ rep.setRunningTaskAttemptIds(runningAtts);
+ if (report.getSuccessfulAttempt() != null) {
+ rep.setSuccessfulAttemptId(fromYarn(report.getSuccessfulAttempt()));
+ }
+ return rep;
+ }
+
+ public static List<TaskReport> fromYarn(
+ List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports) {
+ List<TaskReport> reports = new ArrayList<TaskReport>();
+ for (org.apache.hadoop.mapreduce.v2.api.records.TaskReport r : taskReports) {
+ reports.add(fromYarn(r));
+ }
+ return reports;
+ }
+
+ public static JobStatus.State fromYarn(ApplicationState state) {
+ switch (state) {
+ case SUBMITTED:
+ return State.PREP;
+ case RUNNING:
+ case RESTARTING:
+ return State.RUNNING;
+ case SUCCEEDED:
+ return State.SUCCEEDED;
+ case FAILED:
+ return State.FAILED;
+ case KILLED:
+ return State.KILLED;
+ }
+ throw new YarnException("Unrecognized application state: " + state);
+ }
+
+ private static final String TT_NAME_PREFIX = "tracker_";
+ public static TaskTrackerInfo fromYarn(NodeReport node) {
+ TaskTrackerInfo taskTracker =
+ new TaskTrackerInfo(TT_NAME_PREFIX + node.getNodeId().toString());
+ return taskTracker;
+ }
+
+ public static TaskTrackerInfo[] fromYarnNodes(List<NodeReport> nodes) {
+ List<TaskTrackerInfo> taskTrackers = new ArrayList<TaskTrackerInfo>();
+ for (NodeReport node : nodes) {
+ taskTrackers.add(fromYarn(node));
+ }
+ return taskTrackers.toArray(new TaskTrackerInfo[nodes.size()]);
+ }
+
+ public static JobStatus fromYarn(ApplicationReport application) {
+ String trackingUrl = application.getTrackingUrl();
+ trackingUrl = trackingUrl == null ? "" : trackingUrl;
+
+ JobStatus jobStatus =
+ new JobStatus(
+ TypeConverter.fromYarn(application.getApplicationId()),
+ 0.0f, 0.0f, 0.0f, 0.0f,
+ TypeConverter.fromYarn(application.getState()),
+ org.apache.hadoop.mapreduce.JobPriority.NORMAL,
+ application.getUser(), application.getName(),
+ application.getQueue(), "", trackingUrl
+ );
+ jobStatus.setSchedulingInfo(trackingUrl); // Set AM tracking url
+ return jobStatus;
+ }
+
+ public static JobStatus[] fromYarnApps(List<ApplicationReport> applications) {
+ List<JobStatus> jobStatuses = new ArrayList<JobStatus>();
+ for (ApplicationReport application : applications) {
+ jobStatuses.add(TypeConverter.fromYarn(application));
+ }
+ return jobStatuses.toArray(new JobStatus[jobStatuses.size()]);
+ }
+
+
+ public static QueueInfo fromYarn(org.apache.hadoop.yarn.api.records.QueueInfo
+ queueInfo) {
+ return new QueueInfo(queueInfo.getQueueName(),
+ queueInfo.toString(), QueueState.RUNNING,
+ TypeConverter.fromYarnApps(queueInfo.getApplications()));
+ }
+
+ public static QueueInfo[] fromYarnQueueInfo(
+ List<org.apache.hadoop.yarn.api.records.QueueInfo> queues) {
+ List<QueueInfo> queueInfos = new ArrayList<QueueInfo>(queues.size());
+ for (org.apache.hadoop.yarn.api.records.QueueInfo queue : queues) {
+ queueInfos.add(TypeConverter.fromYarn(queue));
+ }
+ return queueInfos.toArray(new QueueInfo[queueInfos.size()]);
+ }
+
+ public static QueueAclsInfo[] fromYarnQueueUserAclsInfo(
+ List<QueueUserACLInfo> userAcls) {
+ List<QueueAclsInfo> acls = new ArrayList<QueueAclsInfo>();
+ for (QueueUserACLInfo aclInfo : userAcls) {
+ List<String> operations = new ArrayList<String>();
+ for (QueueACL qAcl : aclInfo.getUserAcls()) {
+ operations.add(qAcl.toString());
+ }
+
+ QueueAclsInfo acl =
+ new QueueAclsInfo(aclInfo.getQueueName(),
+ operations.toArray(new String[operations.size()]));
+ acls.add(acl);
+ }
+ return acls.toArray(new QueueAclsInfo[acls.size()]);
+ }
+
+}
+
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/MRConstants.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/MRConstants.java
new file mode 100644
index 0000000..0f919db
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/MRConstants.java
@@ -0,0 +1,55 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2;
+
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface MRConstants {
+
+ public static final String YARN_MR_PREFIX = "yarn.mapreduce.job.";
+
+ // This should be the directory where splits file gets localized on the node
+ // running ApplicationMaster.
+ public static final String JOB_SUBMIT_DIR = "jobSubmitDir";
+
+ // This should be the name of the localized job-configuration file on the node
+ // running ApplicationMaster and Task
+ public static final String JOB_CONF_FILE = "job.xml";
+ // This should be the name of the localized job-jar file on the node running
+ // individual containers/tasks.
+ public static final String JOB_JAR = "job.jar";
+
+ public static final String HADOOP_MAPREDUCE_CLIENT_APP_JAR_NAME =
+ "hadoop-mapreduce-client-app-1.0-SNAPSHOT.jar";
+
+ public static final String YARN_MAPREDUCE_APP_JAR_PATH =
+ "$YARN_HOME/modules/" + HADOOP_MAPREDUCE_CLIENT_APP_JAR_NAME;
+
+ public static final String APPS_STAGING_DIR_KEY = "yarn.apps.stagingDir";
+
+ // The token file for the application. Should contain tokens for access to
+ // remote file system and may optionally contain application specific tokens.
+ // For now, generated by the AppManagers and used by NodeManagers and the
+ // Containers.
+ public static final String APPLICATION_TOKENS_FILE = "appTokens";
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocol.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocol.java
new file mode 100644
index 0000000..ffbfef9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocol.java
@@ -0,0 +1,39 @@
+package org.apache.hadoop.mapreduce.v2.api;
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+
+public interface MRClientProtocol {
+ public GetJobReportResponse getJobReport(GetJobReportRequest request) throws YarnRemoteException;
+ public GetTaskReportResponse getTaskReport(GetTaskReportRequest request) throws YarnRemoteException;
+ public GetTaskAttemptReportResponse getTaskAttemptReport(GetTaskAttemptReportRequest request) throws YarnRemoteException;
+ public GetCountersResponse getCounters(GetCountersRequest request) throws YarnRemoteException;
+ public GetTaskAttemptCompletionEventsResponse getTaskAttemptCompletionEvents(GetTaskAttemptCompletionEventsRequest request) throws YarnRemoteException;
+ public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request) throws YarnRemoteException;
+ public GetDiagnosticsResponse getDiagnostics(GetDiagnosticsRequest request) throws YarnRemoteException;
+ public KillJobResponse killJob(KillJobRequest request) throws YarnRemoteException;
+ public KillTaskResponse killTask(KillTaskRequest request) throws YarnRemoteException;
+ public KillTaskAttemptResponse killTaskAttempt(KillTaskAttemptRequest request) throws YarnRemoteException;
+ public FailTaskAttemptResponse failTaskAttempt(FailTaskAttemptRequest request) throws YarnRemoteException;
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java
new file mode 100644
index 0000000..5c2fdfc
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java
@@ -0,0 +1,268 @@
+package org.apache.hadoop.mapreduce.v2.api.impl.pb.client;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.FailTaskAttemptRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.FailTaskAttemptResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDiagnosticsRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDiagnosticsResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetJobReportRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetJobReportResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskAttemptCompletionEventsRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskAttemptCompletionEventsResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskAttemptReportRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskAttemptReportResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskReportRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskReportResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskReportsRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskReportsResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillJobRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillJobResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskAttemptRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskAttemptResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine;
+import org.apache.hadoop.yarn.proto.MRClientProtocol.MRClientProtocolService;
+
+import com.google.protobuf.ServiceException;
+
+public class MRClientProtocolPBClientImpl implements MRClientProtocol {
+
+ private MRClientProtocolService.BlockingInterface proxy;
+
+ public MRClientProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException {
+ RPC.setProtocolEngine(conf, MRClientProtocolService.BlockingInterface.class, ProtoOverHadoopRpcEngine.class);
+ proxy = (MRClientProtocolService.BlockingInterface)RPC.getProxy(
+ MRClientProtocolService.BlockingInterface.class, clientVersion, addr, conf);
+ }
+
+ @Override
+ public GetJobReportResponse getJobReport(GetJobReportRequest request)
+ throws YarnRemoteException {
+ GetJobReportRequestProto requestProto = ((GetJobReportRequestPBImpl)request).getProto();
+ try {
+ return new GetJobReportResponsePBImpl(proxy.getJobReport(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public GetTaskReportResponse getTaskReport(GetTaskReportRequest request)
+ throws YarnRemoteException {
+ GetTaskReportRequestProto requestProto = ((GetTaskReportRequestPBImpl)request).getProto();
+ try {
+ return new GetTaskReportResponsePBImpl(proxy.getTaskReport(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public GetTaskAttemptReportResponse getTaskAttemptReport(
+ GetTaskAttemptReportRequest request) throws YarnRemoteException {
+ GetTaskAttemptReportRequestProto requestProto = ((GetTaskAttemptReportRequestPBImpl)request).getProto();
+ try {
+ return new GetTaskAttemptReportResponsePBImpl(proxy.getTaskAttemptReport(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public GetCountersResponse getCounters(GetCountersRequest request)
+ throws YarnRemoteException {
+ GetCountersRequestProto requestProto = ((GetCountersRequestPBImpl)request).getProto();
+ try {
+ return new GetCountersResponsePBImpl(proxy.getCounters(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public GetTaskAttemptCompletionEventsResponse getTaskAttemptCompletionEvents(
+ GetTaskAttemptCompletionEventsRequest request) throws YarnRemoteException {
+ GetTaskAttemptCompletionEventsRequestProto requestProto = ((GetTaskAttemptCompletionEventsRequestPBImpl)request).getProto();
+ try {
+ return new GetTaskAttemptCompletionEventsResponsePBImpl(proxy.getTaskAttemptCompletionEvents(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
+ throws YarnRemoteException {
+ GetTaskReportsRequestProto requestProto = ((GetTaskReportsRequestPBImpl)request).getProto();
+ try {
+ return new GetTaskReportsResponsePBImpl(proxy.getTaskReports(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public GetDiagnosticsResponse getDiagnostics(GetDiagnosticsRequest request)
+ throws YarnRemoteException {
+ GetDiagnosticsRequestProto requestProto = ((GetDiagnosticsRequestPBImpl)request).getProto();
+ try {
+ return new GetDiagnosticsResponsePBImpl(proxy.getDiagnostics(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public KillJobResponse killJob(KillJobRequest request)
+ throws YarnRemoteException {
+ KillJobRequestProto requestProto = ((KillJobRequestPBImpl)request).getProto();
+ try {
+ return new KillJobResponsePBImpl(proxy.killJob(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public KillTaskResponse killTask(KillTaskRequest request)
+ throws YarnRemoteException {
+ KillTaskRequestProto requestProto = ((KillTaskRequestPBImpl)request).getProto();
+ try {
+ return new KillTaskResponsePBImpl(proxy.killTask(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public KillTaskAttemptResponse killTaskAttempt(KillTaskAttemptRequest request)
+ throws YarnRemoteException {
+ KillTaskAttemptRequestProto requestProto = ((KillTaskAttemptRequestPBImpl)request).getProto();
+ try {
+ return new KillTaskAttemptResponsePBImpl(proxy.killTaskAttempt(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public FailTaskAttemptResponse failTaskAttempt(FailTaskAttemptRequest request)
+ throws YarnRemoteException {
+ FailTaskAttemptRequestProto requestProto = ((FailTaskAttemptRequestPBImpl)request).getProto();
+ try {
+ return new FailTaskAttemptResponsePBImpl(proxy.failTaskAttempt(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java
new file mode 100644
index 0000000..7f436e3
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java
@@ -0,0 +1,218 @@
+package org.apache.hadoop.mapreduce.v2.api.impl.pb.service;
+
+import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.FailTaskAttemptRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.FailTaskAttemptResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDiagnosticsRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDiagnosticsResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetJobReportRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetJobReportResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskAttemptCompletionEventsRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskAttemptCompletionEventsResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskAttemptReportRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskAttemptReportResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskReportRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskReportResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskReportsRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskReportsResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillJobRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillJobResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskAttemptRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskAttemptResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskResponseProto;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.proto.MRClientProtocol.MRClientProtocolService.BlockingInterface;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+public class MRClientProtocolPBServiceImpl implements BlockingInterface {
+
+ private MRClientProtocol real;
+
+ public MRClientProtocolPBServiceImpl(MRClientProtocol impl) {
+ this.real = impl;
+ }
+
+ @Override
+ public GetJobReportResponseProto getJobReport(RpcController controller,
+ GetJobReportRequestProto proto) throws ServiceException {
+ GetJobReportRequestPBImpl request = new GetJobReportRequestPBImpl(proto);
+ try {
+ GetJobReportResponse response = real.getJobReport(request);
+ return ((GetJobReportResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public GetTaskReportResponseProto getTaskReport(RpcController controller,
+ GetTaskReportRequestProto proto) throws ServiceException {
+ GetTaskReportRequest request = new GetTaskReportRequestPBImpl(proto);
+ try {
+ GetTaskReportResponse response = real.getTaskReport(request);
+ return ((GetTaskReportResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public GetTaskAttemptReportResponseProto getTaskAttemptReport(
+ RpcController controller, GetTaskAttemptReportRequestProto proto)
+ throws ServiceException {
+ GetTaskAttemptReportRequest request = new GetTaskAttemptReportRequestPBImpl(proto);
+ try {
+ GetTaskAttemptReportResponse response = real.getTaskAttemptReport(request);
+ return ((GetTaskAttemptReportResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public GetCountersResponseProto getCounters(RpcController controller,
+ GetCountersRequestProto proto) throws ServiceException {
+ GetCountersRequest request = new GetCountersRequestPBImpl(proto);
+ try {
+ GetCountersResponse response = real.getCounters(request);
+ return ((GetCountersResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public GetTaskAttemptCompletionEventsResponseProto getTaskAttemptCompletionEvents(
+ RpcController controller,
+ GetTaskAttemptCompletionEventsRequestProto proto)
+ throws ServiceException {
+ GetTaskAttemptCompletionEventsRequest request = new GetTaskAttemptCompletionEventsRequestPBImpl(proto);
+ try {
+ GetTaskAttemptCompletionEventsResponse response = real.getTaskAttemptCompletionEvents(request);
+ return ((GetTaskAttemptCompletionEventsResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public GetTaskReportsResponseProto getTaskReports(RpcController controller,
+ GetTaskReportsRequestProto proto) throws ServiceException {
+ GetTaskReportsRequest request = new GetTaskReportsRequestPBImpl(proto);
+ try {
+ GetTaskReportsResponse response = real.getTaskReports(request);
+ return ((GetTaskReportsResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public GetDiagnosticsResponseProto getDiagnostics(RpcController controller,
+ GetDiagnosticsRequestProto proto) throws ServiceException {
+ GetDiagnosticsRequest request = new GetDiagnosticsRequestPBImpl(proto);
+ try {
+ GetDiagnosticsResponse response = real.getDiagnostics(request);
+ return ((GetDiagnosticsResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public KillJobResponseProto killJob(RpcController controller,
+ KillJobRequestProto proto) throws ServiceException {
+ KillJobRequest request = new KillJobRequestPBImpl(proto);
+ try {
+ KillJobResponse response = real.killJob(request);
+ return ((KillJobResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public KillTaskResponseProto killTask(RpcController controller,
+ KillTaskRequestProto proto) throws ServiceException {
+ KillTaskRequest request = new KillTaskRequestPBImpl(proto);
+ try {
+ KillTaskResponse response = real.killTask(request);
+ return ((KillTaskResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public KillTaskAttemptResponseProto killTaskAttempt(RpcController controller,
+ KillTaskAttemptRequestProto proto) throws ServiceException {
+ KillTaskAttemptRequest request = new KillTaskAttemptRequestPBImpl(proto);
+ try {
+ KillTaskAttemptResponse response = real.killTaskAttempt(request);
+ return ((KillTaskAttemptResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public FailTaskAttemptResponseProto failTaskAttempt(RpcController controller,
+ FailTaskAttemptRequestProto proto) throws ServiceException {
+ FailTaskAttemptRequest request = new FailTaskAttemptRequestPBImpl(proto);
+ try {
+ FailTaskAttemptResponse response = real.failTaskAttempt(request);
+ return ((FailTaskAttemptResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/FailTaskAttemptRequest.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/FailTaskAttemptRequest.java
new file mode 100644
index 0000000..2f0404d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/FailTaskAttemptRequest.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+
+public interface FailTaskAttemptRequest {
+ public abstract TaskAttemptId getTaskAttemptId();
+
+ public abstract void setTaskAttemptId(TaskAttemptId taskAttemptId);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/FailTaskAttemptResponse.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/FailTaskAttemptResponse.java
new file mode 100644
index 0000000..2c11da6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/FailTaskAttemptResponse.java
@@ -0,0 +1,5 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+public interface FailTaskAttemptResponse {
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetCountersRequest.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetCountersRequest.java
new file mode 100644
index 0000000..f3a9b36
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetCountersRequest.java
@@ -0,0 +1,10 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+
+public interface GetCountersRequest {
+ public abstract JobId getJobId();
+
+ public abstract void setJobId(JobId jobId);
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetCountersResponse.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetCountersResponse.java
new file mode 100644
index 0000000..ada0195
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetCountersResponse.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+
+public interface GetCountersResponse {
+ public abstract Counters getCounters();
+
+ public abstract void setCounters(Counters counters);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetDiagnosticsRequest.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetDiagnosticsRequest.java
new file mode 100644
index 0000000..16e6d9a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetDiagnosticsRequest.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+
+public interface GetDiagnosticsRequest {
+ public abstract TaskAttemptId getTaskAttemptId();
+
+ public abstract void setTaskAttemptId(TaskAttemptId taskAttemptId);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetDiagnosticsResponse.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetDiagnosticsResponse.java
new file mode 100644
index 0000000..ff59833
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetDiagnosticsResponse.java
@@ -0,0 +1,15 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import java.util.List;
+
+public interface GetDiagnosticsResponse {
+ public abstract List<String> getDiagnosticsList();
+ public abstract String getDiagnostics(int index);
+ public abstract int getDiagnosticsCount();
+
+ public abstract void addAllDiagnostics(List<String> diagnostics);
+ public abstract void addDiagnostics(String diagnostic);
+ public abstract void removeDiagnostics(int index);
+ public abstract void clearDiagnostics();
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetJobReportRequest.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetJobReportRequest.java
new file mode 100644
index 0000000..aaf6b2c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetJobReportRequest.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+
+public interface GetJobReportRequest {
+ public abstract JobId getJobId();
+
+ public abstract void setJobId(JobId jobId);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetJobReportResponse.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetJobReportResponse.java
new file mode 100644
index 0000000..4275e65
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetJobReportResponse.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+
+public interface GetJobReportResponse {
+ public abstract JobReport getJobReport();
+
+ public abstract void setJobReport(JobReport jobReport);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskAttemptCompletionEventsRequest.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskAttemptCompletionEventsRequest.java
new file mode 100644
index 0000000..9babdc4
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskAttemptCompletionEventsRequest.java
@@ -0,0 +1,13 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+
+public interface GetTaskAttemptCompletionEventsRequest {
+ public abstract JobId getJobId();
+ public abstract int getFromEventId();
+ public abstract int getMaxEvents();
+
+ public abstract void setJobId(JobId jobId);
+ public abstract void setFromEventId(int id);
+ public abstract void setMaxEvents(int maxEvents);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskAttemptCompletionEventsResponse.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskAttemptCompletionEventsResponse.java
new file mode 100644
index 0000000..655d4e8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskAttemptCompletionEventsResponse.java
@@ -0,0 +1,16 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import java.util.List;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
+
+public interface GetTaskAttemptCompletionEventsResponse {
+ public abstract List<TaskAttemptCompletionEvent> getCompletionEventList();
+ public abstract TaskAttemptCompletionEvent getCompletionEvent(int index);
+ public abstract int getCompletionEventCount();
+
+ public abstract void addAllCompletionEvents(List<TaskAttemptCompletionEvent> eventList);
+ public abstract void addCompletionEvent(TaskAttemptCompletionEvent event);
+ public abstract void removeCompletionEvent(int index);
+ public abstract void clearCompletionEvents();
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskAttemptReportRequest.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskAttemptReportRequest.java
new file mode 100644
index 0000000..fb18d00
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskAttemptReportRequest.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+
+public interface GetTaskAttemptReportRequest {
+ public abstract TaskAttemptId getTaskAttemptId();
+
+ public abstract void setTaskAttemptId(TaskAttemptId taskAttemptId);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskAttemptReportResponse.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskAttemptReportResponse.java
new file mode 100644
index 0000000..968de23
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskAttemptReportResponse.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
+
+public interface GetTaskAttemptReportResponse {
+ public abstract TaskAttemptReport getTaskAttemptReport();
+
+ public abstract void setTaskAttemptReport(TaskAttemptReport taskAttemptReport);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskReportRequest.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskReportRequest.java
new file mode 100644
index 0000000..da0dad0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskReportRequest.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+
+public interface GetTaskReportRequest {
+ public abstract TaskId getTaskId();
+
+ public abstract void setTaskId(TaskId taskId);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskReportResponse.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskReportResponse.java
new file mode 100644
index 0000000..06949fa
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskReportResponse.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
+
+public interface GetTaskReportResponse {
+ public abstract TaskReport getTaskReport();
+
+ public abstract void setTaskReport(TaskReport taskReport);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskReportsRequest.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskReportsRequest.java
new file mode 100644
index 0000000..4112c98
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskReportsRequest.java
@@ -0,0 +1,13 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+
+public interface GetTaskReportsRequest {
+
+ public abstract JobId getJobId();
+ public abstract TaskType getTaskType();
+
+ public abstract void setJobId(JobId jobId);
+ public abstract void setTaskType(TaskType taskType);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskReportsResponse.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskReportsResponse.java
new file mode 100644
index 0000000..b2fd204
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskReportsResponse.java
@@ -0,0 +1,16 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import java.util.List;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
+
+public interface GetTaskReportsResponse {
+ public abstract List<TaskReport> getTaskReportList();
+ public abstract TaskReport getTaskReport(int index);
+ public abstract int getTaskReportCount();
+
+ public abstract void addAllTaskReports(List<TaskReport> taskReports);
+ public abstract void addTaskReport(TaskReport taskReport);
+ public abstract void removeTaskReport(int index);
+ public abstract void clearTaskReports();
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillJobRequest.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillJobRequest.java
new file mode 100644
index 0000000..0dcd394
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillJobRequest.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+
+public interface KillJobRequest {
+ public abstract JobId getJobId();
+
+ public abstract void setJobId(JobId jobId);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillJobResponse.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillJobResponse.java
new file mode 100644
index 0000000..fcb7b59
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillJobResponse.java
@@ -0,0 +1,5 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+public interface KillJobResponse {
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillTaskAttemptRequest.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillTaskAttemptRequest.java
new file mode 100644
index 0000000..e05de27
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillTaskAttemptRequest.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+
+public interface KillTaskAttemptRequest {
+ public abstract TaskAttemptId getTaskAttemptId();
+
+ public abstract void setTaskAttemptId(TaskAttemptId taskAttemptId);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillTaskAttemptResponse.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillTaskAttemptResponse.java
new file mode 100644
index 0000000..5f5d5e8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillTaskAttemptResponse.java
@@ -0,0 +1,5 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+public interface KillTaskAttemptResponse {
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillTaskRequest.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillTaskRequest.java
new file mode 100644
index 0000000..f1b77c6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillTaskRequest.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+
+public interface KillTaskRequest {
+ public abstract TaskId getTaskId();
+
+ public abstract void setTaskId(TaskId taskId);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillTaskResponse.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillTaskResponse.java
new file mode 100644
index 0000000..d7be875
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillTaskResponse.java
@@ -0,0 +1,5 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+public interface KillTaskResponse {
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/FailTaskAttemptRequestPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/FailTaskAttemptRequestPBImpl.java
new file mode 100644
index 0000000..493afc5
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/FailTaskAttemptRequestPBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskAttemptIdPBImpl;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptIdProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class FailTaskAttemptRequestPBImpl extends ProtoBase<FailTaskAttemptRequestProto> implements FailTaskAttemptRequest {
+ FailTaskAttemptRequestProto proto = FailTaskAttemptRequestProto.getDefaultInstance();
+ FailTaskAttemptRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private TaskAttemptId taskAttemptId = null;
+
+
+ public FailTaskAttemptRequestPBImpl() {
+ builder = FailTaskAttemptRequestProto.newBuilder();
+ }
+
+ public FailTaskAttemptRequestPBImpl(FailTaskAttemptRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public FailTaskAttemptRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.taskAttemptId != null) {
+ builder.setTaskAttemptId(convertToProtoFormat(this.taskAttemptId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = FailTaskAttemptRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public TaskAttemptId getTaskAttemptId() {
+ FailTaskAttemptRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.taskAttemptId != null) {
+ return this.taskAttemptId;
+ }
+ if (!p.hasTaskAttemptId()) {
+ return null;
+ }
+ this.taskAttemptId = convertFromProtoFormat(p.getTaskAttemptId());
+ return this.taskAttemptId;
+ }
+
+ @Override
+ public void setTaskAttemptId(TaskAttemptId taskAttemptId) {
+ maybeInitBuilder();
+ if (taskAttemptId == null)
+ builder.clearTaskAttemptId();
+ this.taskAttemptId = taskAttemptId;
+ }
+
+ private TaskAttemptIdPBImpl convertFromProtoFormat(TaskAttemptIdProto p) {
+ return new TaskAttemptIdPBImpl(p);
+ }
+
+ private TaskAttemptIdProto convertToProtoFormat(TaskAttemptId t) {
+ return ((TaskAttemptIdPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/FailTaskAttemptResponsePBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/FailTaskAttemptResponsePBImpl.java
new file mode 100644
index 0000000..5eb4eff
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/FailTaskAttemptResponsePBImpl.java
@@ -0,0 +1,37 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptResponseProto;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class FailTaskAttemptResponsePBImpl extends ProtoBase<FailTaskAttemptResponseProto> implements FailTaskAttemptResponse {
+ FailTaskAttemptResponseProto proto = FailTaskAttemptResponseProto.getDefaultInstance();
+ FailTaskAttemptResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public FailTaskAttemptResponsePBImpl() {
+ builder = FailTaskAttemptResponseProto.newBuilder();
+ }
+
+ public FailTaskAttemptResponsePBImpl(FailTaskAttemptResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public FailTaskAttemptResponseProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = FailTaskAttemptResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetCountersRequestPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetCountersRequestPBImpl.java
new file mode 100644
index 0000000..aeebc69
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetCountersRequestPBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobIdPBImpl;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobIdProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class GetCountersRequestPBImpl extends ProtoBase<GetCountersRequestProto> implements GetCountersRequest {
+ GetCountersRequestProto proto = GetCountersRequestProto.getDefaultInstance();
+ GetCountersRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private JobId jobId = null;
+
+
+ public GetCountersRequestPBImpl() {
+ builder = GetCountersRequestProto.newBuilder();
+ }
+
+ public GetCountersRequestPBImpl(GetCountersRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetCountersRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.jobId != null) {
+ builder.setJobId(convertToProtoFormat(this.jobId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetCountersRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public JobId getJobId() {
+ GetCountersRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.jobId != null) {
+ return this.jobId;
+ }
+ if (!p.hasJobId()) {
+ return null;
+ }
+ this.jobId = convertFromProtoFormat(p.getJobId());
+ return this.jobId;
+ }
+
+ @Override
+ public void setJobId(JobId jobId) {
+ maybeInitBuilder();
+ if (jobId == null)
+ builder.clearJobId();
+ this.jobId = jobId;
+ }
+
+ private JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
+ return new JobIdPBImpl(p);
+ }
+
+ private JobIdProto convertToProtoFormat(JobId t) {
+ return ((JobIdPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetCountersResponsePBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetCountersResponsePBImpl.java
new file mode 100644
index 0000000..b86641a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetCountersResponsePBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.CountersPBImpl;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CountersProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class GetCountersResponsePBImpl extends ProtoBase<GetCountersResponseProto> implements GetCountersResponse {
+ GetCountersResponseProto proto = GetCountersResponseProto.getDefaultInstance();
+ GetCountersResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private Counters counters = null;
+
+
+ public GetCountersResponsePBImpl() {
+ builder = GetCountersResponseProto.newBuilder();
+ }
+
+ public GetCountersResponsePBImpl(GetCountersResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetCountersResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.counters != null) {
+ builder.setCounters(convertToProtoFormat(this.counters));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetCountersResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public Counters getCounters() {
+ GetCountersResponseProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.counters != null) {
+ return this.counters;
+ }
+ if (!p.hasCounters()) {
+ return null;
+ }
+ this.counters = convertFromProtoFormat(p.getCounters());
+ return this.counters;
+ }
+
+ @Override
+ public void setCounters(Counters counters) {
+ maybeInitBuilder();
+ if (counters == null)
+ builder.clearCounters();
+ this.counters = counters;
+ }
+
+ private CountersPBImpl convertFromProtoFormat(CountersProto p) {
+ return new CountersPBImpl(p);
+ }
+
+ private CountersProto convertToProtoFormat(Counters t) {
+ return ((CountersPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDiagnosticsRequestPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDiagnosticsRequestPBImpl.java
new file mode 100644
index 0000000..993b557
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDiagnosticsRequestPBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskAttemptIdPBImpl;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptIdProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class GetDiagnosticsRequestPBImpl extends ProtoBase<GetDiagnosticsRequestProto> implements GetDiagnosticsRequest {
+ GetDiagnosticsRequestProto proto = GetDiagnosticsRequestProto.getDefaultInstance();
+ GetDiagnosticsRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private TaskAttemptId taskAttemptId = null;
+
+
+ public GetDiagnosticsRequestPBImpl() {
+ builder = GetDiagnosticsRequestProto.newBuilder();
+ }
+
+ public GetDiagnosticsRequestPBImpl(GetDiagnosticsRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetDiagnosticsRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.taskAttemptId != null) {
+ builder.setTaskAttemptId(convertToProtoFormat(this.taskAttemptId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetDiagnosticsRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public TaskAttemptId getTaskAttemptId() {
+ GetDiagnosticsRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.taskAttemptId != null) {
+ return this.taskAttemptId;
+ }
+ if (!p.hasTaskAttemptId()) {
+ return null;
+ }
+ this.taskAttemptId = convertFromProtoFormat(p.getTaskAttemptId());
+ return this.taskAttemptId;
+ }
+
+ @Override
+ public void setTaskAttemptId(TaskAttemptId taskAttemptId) {
+ maybeInitBuilder();
+ if (taskAttemptId == null)
+ builder.clearTaskAttemptId();
+ this.taskAttemptId = taskAttemptId;
+ }
+
+ private TaskAttemptIdPBImpl convertFromProtoFormat(TaskAttemptIdProto p) {
+ return new TaskAttemptIdPBImpl(p);
+ }
+
+ private TaskAttemptIdProto convertToProtoFormat(TaskAttemptId t) {
+ return ((TaskAttemptIdPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDiagnosticsResponsePBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDiagnosticsResponsePBImpl.java
new file mode 100644
index 0000000..b829fe3
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDiagnosticsResponsePBImpl.java
@@ -0,0 +1,120 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class GetDiagnosticsResponsePBImpl extends ProtoBase<GetDiagnosticsResponseProto> implements GetDiagnosticsResponse {
+ GetDiagnosticsResponseProto proto = GetDiagnosticsResponseProto.getDefaultInstance();
+ GetDiagnosticsResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private List<String> diagnostics = null;
+
+
+ public GetDiagnosticsResponsePBImpl() {
+ builder = GetDiagnosticsResponseProto.newBuilder();
+ }
+
+ public GetDiagnosticsResponsePBImpl(GetDiagnosticsResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetDiagnosticsResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.diagnostics != null) {
+ addDiagnosticsToProto();
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetDiagnosticsResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public List<String> getDiagnosticsList() {
+ initDiagnostics();
+ return this.diagnostics;
+ }
+ @Override
+ public String getDiagnostics(int index) {
+ initDiagnostics();
+ return this.diagnostics.get(index);
+ }
+ @Override
+ public int getDiagnosticsCount() {
+ initDiagnostics();
+ return this.diagnostics.size();
+ }
+
+ private void initDiagnostics() {
+ if (this.diagnostics != null) {
+ return;
+ }
+ GetDiagnosticsResponseProtoOrBuilder p = viaProto ? proto : builder;
+ List<String> list = p.getDiagnosticsList();
+ this.diagnostics = new ArrayList<String>();
+
+ for (String c : list) {
+ this.diagnostics.add(c);
+ }
+ }
+
+ @Override
+ public void addAllDiagnostics(final List<String> diagnostics) {
+ if (diagnostics == null)
+ return;
+ initDiagnostics();
+ this.diagnostics.addAll(diagnostics);
+ }
+
+ private void addDiagnosticsToProto() {
+ maybeInitBuilder();
+ builder.clearDiagnostics();
+ if (diagnostics == null)
+ return;
+ builder.addAllDiagnostics(diagnostics);
+ }
+ @Override
+ public void addDiagnostics(String diagnostics) {
+ initDiagnostics();
+ this.diagnostics.add(diagnostics);
+ }
+ @Override
+ public void removeDiagnostics(int index) {
+ initDiagnostics();
+ this.diagnostics.remove(index);
+ }
+ @Override
+ public void clearDiagnostics() {
+ initDiagnostics();
+ this.diagnostics.clear();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetJobReportRequestPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetJobReportRequestPBImpl.java
new file mode 100644
index 0000000..6d6d387
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetJobReportRequestPBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobIdPBImpl;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobIdProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class GetJobReportRequestPBImpl extends ProtoBase<GetJobReportRequestProto> implements GetJobReportRequest {
+ GetJobReportRequestProto proto = GetJobReportRequestProto.getDefaultInstance();
+ GetJobReportRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private JobId jobId = null;
+
+
+ public GetJobReportRequestPBImpl() {
+ builder = GetJobReportRequestProto.newBuilder();
+ }
+
+ public GetJobReportRequestPBImpl(GetJobReportRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetJobReportRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.jobId != null) {
+ builder.setJobId(convertToProtoFormat(this.jobId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetJobReportRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public JobId getJobId() {
+ GetJobReportRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.jobId != null) {
+ return this.jobId;
+ }
+ if (!p.hasJobId()) {
+ return null;
+ }
+ this.jobId = convertFromProtoFormat(p.getJobId());
+ return this.jobId;
+ }
+
+ @Override
+ public void setJobId(JobId jobId) {
+ maybeInitBuilder();
+ if (jobId == null)
+ builder.clearJobId();
+ this.jobId = jobId;
+ }
+
+ private JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
+ return new JobIdPBImpl(p);
+ }
+
+ private JobIdProto convertToProtoFormat(JobId t) {
+ return ((JobIdPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetJobReportResponsePBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetJobReportResponsePBImpl.java
new file mode 100644
index 0000000..b00f316
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetJobReportResponsePBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobReportPBImpl;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobReportProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportResponseProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class GetJobReportResponsePBImpl extends ProtoBase<GetJobReportResponseProto> implements GetJobReportResponse {
+ GetJobReportResponseProto proto = GetJobReportResponseProto.getDefaultInstance();
+ GetJobReportResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private JobReport jobReport = null;
+
+
+ public GetJobReportResponsePBImpl() {
+ builder = GetJobReportResponseProto.newBuilder();
+ }
+
+ public GetJobReportResponsePBImpl(GetJobReportResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetJobReportResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.jobReport != null) {
+ builder.setJobReport(convertToProtoFormat(this.jobReport));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetJobReportResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public JobReport getJobReport() {
+ GetJobReportResponseProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.jobReport != null) {
+ return this.jobReport;
+ }
+ if (!p.hasJobReport()) {
+ return null;
+ }
+ this.jobReport = convertFromProtoFormat(p.getJobReport());
+ return this.jobReport;
+ }
+
+ @Override
+ public void setJobReport(JobReport jobReport) {
+ maybeInitBuilder();
+ if (jobReport == null)
+ builder.clearJobReport();
+ this.jobReport = jobReport;
+ }
+
+ private JobReportPBImpl convertFromProtoFormat(JobReportProto p) {
+ return new JobReportPBImpl(p);
+ }
+
+ private JobReportProto convertToProtoFormat(JobReport t) {
+ return ((JobReportPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskAttemptCompletionEventsRequestPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskAttemptCompletionEventsRequestPBImpl.java
new file mode 100644
index 0000000..37298fb
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskAttemptCompletionEventsRequestPBImpl.java
@@ -0,0 +1,113 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobIdPBImpl;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobIdProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class GetTaskAttemptCompletionEventsRequestPBImpl extends ProtoBase<GetTaskAttemptCompletionEventsRequestProto> implements GetTaskAttemptCompletionEventsRequest {
+ GetTaskAttemptCompletionEventsRequestProto proto = GetTaskAttemptCompletionEventsRequestProto.getDefaultInstance();
+ GetTaskAttemptCompletionEventsRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private JobId jobId = null;
+
+
+ public GetTaskAttemptCompletionEventsRequestPBImpl() {
+ builder = GetTaskAttemptCompletionEventsRequestProto.newBuilder();
+ }
+
+ public GetTaskAttemptCompletionEventsRequestPBImpl(GetTaskAttemptCompletionEventsRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetTaskAttemptCompletionEventsRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.jobId != null) {
+ builder.setJobId(convertToProtoFormat(this.jobId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetTaskAttemptCompletionEventsRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public JobId getJobId() {
+ GetTaskAttemptCompletionEventsRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.jobId != null) {
+ return this.jobId;
+ }
+ if (!p.hasJobId()) {
+ return null;
+ }
+ this.jobId = convertFromProtoFormat(p.getJobId());
+ return this.jobId;
+ }
+
+ @Override
+ public void setJobId(JobId jobId) {
+ maybeInitBuilder();
+ if (jobId == null)
+ builder.clearJobId();
+ this.jobId = jobId;
+ }
+ @Override
+ public int getFromEventId() {
+ GetTaskAttemptCompletionEventsRequestProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getFromEventId());
+ }
+
+ @Override
+ public void setFromEventId(int fromEventId) {
+ maybeInitBuilder();
+ builder.setFromEventId((fromEventId));
+ }
+ @Override
+ public int getMaxEvents() {
+ GetTaskAttemptCompletionEventsRequestProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getMaxEvents());
+ }
+
+ @Override
+ public void setMaxEvents(int maxEvents) {
+ maybeInitBuilder();
+ builder.setMaxEvents((maxEvents));
+ }
+
+ private JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
+ return new JobIdPBImpl(p);
+ }
+
+ private JobIdProto convertToProtoFormat(JobId t) {
+ return ((JobIdPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskAttemptCompletionEventsResponsePBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskAttemptCompletionEventsResponsePBImpl.java
new file mode 100644
index 0000000..da4e8ad
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskAttemptCompletionEventsResponsePBImpl.java
@@ -0,0 +1,160 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskAttemptCompletionEventPBImpl;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptCompletionEventProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsResponseProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class GetTaskAttemptCompletionEventsResponsePBImpl extends ProtoBase<GetTaskAttemptCompletionEventsResponseProto> implements GetTaskAttemptCompletionEventsResponse {
+ GetTaskAttemptCompletionEventsResponseProto proto = GetTaskAttemptCompletionEventsResponseProto.getDefaultInstance();
+ GetTaskAttemptCompletionEventsResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private List<TaskAttemptCompletionEvent> completionEvents = null;
+
+
+ public GetTaskAttemptCompletionEventsResponsePBImpl() {
+ builder = GetTaskAttemptCompletionEventsResponseProto.newBuilder();
+ }
+
+ public GetTaskAttemptCompletionEventsResponsePBImpl(GetTaskAttemptCompletionEventsResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetTaskAttemptCompletionEventsResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.completionEvents != null) {
+ addCompletionEventsToProto();
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetTaskAttemptCompletionEventsResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public List<TaskAttemptCompletionEvent> getCompletionEventList() {
+ initCompletionEvents();
+ return this.completionEvents;
+ }
+ @Override
+ public TaskAttemptCompletionEvent getCompletionEvent(int index) {
+ initCompletionEvents();
+ return this.completionEvents.get(index);
+ }
+ @Override
+ public int getCompletionEventCount() {
+ initCompletionEvents();
+ return this.completionEvents.size();
+ }
+
+ private void initCompletionEvents() {
+ if (this.completionEvents != null) {
+ return;
+ }
+ GetTaskAttemptCompletionEventsResponseProtoOrBuilder p = viaProto ? proto : builder;
+ List<TaskAttemptCompletionEventProto> list = p.getCompletionEventsList();
+ this.completionEvents = new ArrayList<TaskAttemptCompletionEvent>();
+
+ for (TaskAttemptCompletionEventProto c : list) {
+ this.completionEvents.add(convertFromProtoFormat(c));
+ }
+ }
+
+ @Override
+ public void addAllCompletionEvents(final List<TaskAttemptCompletionEvent> completionEvents) {
+ if (completionEvents == null)
+ return;
+ initCompletionEvents();
+ this.completionEvents.addAll(completionEvents);
+ }
+
+ private void addCompletionEventsToProto() {
+ maybeInitBuilder();
+ builder.clearCompletionEvents();
+ if (completionEvents == null)
+ return;
+ Iterable<TaskAttemptCompletionEventProto> iterable = new Iterable<TaskAttemptCompletionEventProto>() {
+ @Override
+ public Iterator<TaskAttemptCompletionEventProto> iterator() {
+ return new Iterator<TaskAttemptCompletionEventProto>() {
+
+ Iterator<TaskAttemptCompletionEvent> iter = completionEvents.iterator();
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+
+ @Override
+ public TaskAttemptCompletionEventProto next() {
+ return convertToProtoFormat(iter.next());
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+
+ }
+ };
+
+ }
+ };
+ builder.addAllCompletionEvents(iterable);
+ }
+ @Override
+ public void addCompletionEvent(TaskAttemptCompletionEvent completionEvents) {
+ initCompletionEvents();
+ this.completionEvents.add(completionEvents);
+ }
+ @Override
+ public void removeCompletionEvent(int index) {
+ initCompletionEvents();
+ this.completionEvents.remove(index);
+ }
+ @Override
+ public void clearCompletionEvents() {
+ initCompletionEvents();
+ this.completionEvents.clear();
+ }
+
+ private TaskAttemptCompletionEventPBImpl convertFromProtoFormat(TaskAttemptCompletionEventProto p) {
+ return new TaskAttemptCompletionEventPBImpl(p);
+ }
+
+ private TaskAttemptCompletionEventProto convertToProtoFormat(TaskAttemptCompletionEvent t) {
+ return ((TaskAttemptCompletionEventPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskAttemptReportRequestPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskAttemptReportRequestPBImpl.java
new file mode 100644
index 0000000..5516c32
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskAttemptReportRequestPBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskAttemptIdPBImpl;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptIdProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class GetTaskAttemptReportRequestPBImpl extends ProtoBase<GetTaskAttemptReportRequestProto> implements GetTaskAttemptReportRequest {
+ GetTaskAttemptReportRequestProto proto = GetTaskAttemptReportRequestProto.getDefaultInstance();
+ GetTaskAttemptReportRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private TaskAttemptId taskAttemptId = null;
+
+
+ public GetTaskAttemptReportRequestPBImpl() {
+ builder = GetTaskAttemptReportRequestProto.newBuilder();
+ }
+
+ public GetTaskAttemptReportRequestPBImpl(GetTaskAttemptReportRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetTaskAttemptReportRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.taskAttemptId != null) {
+ builder.setTaskAttemptId(convertToProtoFormat(this.taskAttemptId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetTaskAttemptReportRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public TaskAttemptId getTaskAttemptId() {
+ GetTaskAttemptReportRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.taskAttemptId != null) {
+ return this.taskAttemptId;
+ }
+ if (!p.hasTaskAttemptId()) {
+ return null;
+ }
+ this.taskAttemptId = convertFromProtoFormat(p.getTaskAttemptId());
+ return this.taskAttemptId;
+ }
+
+ @Override
+ public void setTaskAttemptId(TaskAttemptId taskAttemptId) {
+ maybeInitBuilder();
+ if (taskAttemptId == null)
+ builder.clearTaskAttemptId();
+ this.taskAttemptId = taskAttemptId;
+ }
+
+ private TaskAttemptIdPBImpl convertFromProtoFormat(TaskAttemptIdProto p) {
+ return new TaskAttemptIdPBImpl(p);
+ }
+
+ private TaskAttemptIdProto convertToProtoFormat(TaskAttemptId t) {
+ return ((TaskAttemptIdPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskAttemptReportResponsePBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskAttemptReportResponsePBImpl.java
new file mode 100644
index 0000000..d9bb261
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskAttemptReportResponsePBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskAttemptReportPBImpl;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptReportProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportResponseProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class GetTaskAttemptReportResponsePBImpl extends ProtoBase<GetTaskAttemptReportResponseProto> implements GetTaskAttemptReportResponse {
+ GetTaskAttemptReportResponseProto proto = GetTaskAttemptReportResponseProto.getDefaultInstance();
+ GetTaskAttemptReportResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private TaskAttemptReport taskAttemptReport = null;
+
+
+ public GetTaskAttemptReportResponsePBImpl() {
+ builder = GetTaskAttemptReportResponseProto.newBuilder();
+ }
+
+ public GetTaskAttemptReportResponsePBImpl(GetTaskAttemptReportResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetTaskAttemptReportResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.taskAttemptReport != null) {
+ builder.setTaskAttemptReport(convertToProtoFormat(this.taskAttemptReport));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetTaskAttemptReportResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public TaskAttemptReport getTaskAttemptReport() {
+ GetTaskAttemptReportResponseProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.taskAttemptReport != null) {
+ return this.taskAttemptReport;
+ }
+ if (!p.hasTaskAttemptReport()) {
+ return null;
+ }
+ this.taskAttemptReport = convertFromProtoFormat(p.getTaskAttemptReport());
+ return this.taskAttemptReport;
+ }
+
+ @Override
+ public void setTaskAttemptReport(TaskAttemptReport taskAttemptReport) {
+ maybeInitBuilder();
+ if (taskAttemptReport == null)
+ builder.clearTaskAttemptReport();
+ this.taskAttemptReport = taskAttemptReport;
+ }
+
+ private TaskAttemptReportPBImpl convertFromProtoFormat(TaskAttemptReportProto p) {
+ return new TaskAttemptReportPBImpl(p);
+ }
+
+ private TaskAttemptReportProto convertToProtoFormat(TaskAttemptReport t) {
+ return ((TaskAttemptReportPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskReportRequestPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskReportRequestPBImpl.java
new file mode 100644
index 0000000..2c52d6b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskReportRequestPBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskIdPBImpl;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskIdProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class GetTaskReportRequestPBImpl extends ProtoBase<GetTaskReportRequestProto> implements GetTaskReportRequest {
+ GetTaskReportRequestProto proto = GetTaskReportRequestProto.getDefaultInstance();
+ GetTaskReportRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private TaskId taskId = null;
+
+
+ public GetTaskReportRequestPBImpl() {
+ builder = GetTaskReportRequestProto.newBuilder();
+ }
+
+ public GetTaskReportRequestPBImpl(GetTaskReportRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetTaskReportRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.taskId != null) {
+ builder.setTaskId(convertToProtoFormat(this.taskId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetTaskReportRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public TaskId getTaskId() {
+ GetTaskReportRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.taskId != null) {
+ return this.taskId;
+ }
+ if (!p.hasTaskId()) {
+ return null;
+ }
+ this.taskId = convertFromProtoFormat(p.getTaskId());
+ return this.taskId;
+ }
+
+ @Override
+ public void setTaskId(TaskId taskId) {
+ maybeInitBuilder();
+ if (taskId == null)
+ builder.clearTaskId();
+ this.taskId = taskId;
+ }
+
+ private TaskIdPBImpl convertFromProtoFormat(TaskIdProto p) {
+ return new TaskIdPBImpl(p);
+ }
+
+ private TaskIdProto convertToProtoFormat(TaskId t) {
+ return ((TaskIdPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskReportResponsePBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskReportResponsePBImpl.java
new file mode 100644
index 0000000..454976e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskReportResponsePBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskReportPBImpl;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskReportProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportResponseProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class GetTaskReportResponsePBImpl extends ProtoBase<GetTaskReportResponseProto> implements GetTaskReportResponse {
+ GetTaskReportResponseProto proto = GetTaskReportResponseProto.getDefaultInstance();
+ GetTaskReportResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private TaskReport taskReport = null;
+
+
+ public GetTaskReportResponsePBImpl() {
+ builder = GetTaskReportResponseProto.newBuilder();
+ }
+
+ public GetTaskReportResponsePBImpl(GetTaskReportResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetTaskReportResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.taskReport != null) {
+ builder.setTaskReport(convertToProtoFormat(this.taskReport));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetTaskReportResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public TaskReport getTaskReport() {
+ GetTaskReportResponseProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.taskReport != null) {
+ return this.taskReport;
+ }
+ if (!p.hasTaskReport()) {
+ return null;
+ }
+ this.taskReport = convertFromProtoFormat(p.getTaskReport());
+ return this.taskReport;
+ }
+
+ @Override
+ public void setTaskReport(TaskReport taskReport) {
+ maybeInitBuilder();
+ if (taskReport == null)
+ builder.clearTaskReport();
+ this.taskReport = taskReport;
+ }
+
+ private TaskReportPBImpl convertFromProtoFormat(TaskReportProto p) {
+ return new TaskReportPBImpl(p);
+ }
+
+ private TaskReportProto convertToProtoFormat(TaskReport t) {
+ return ((TaskReportPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskReportsRequestPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskReportsRequestPBImpl.java
new file mode 100644
index 0000000..9deb5aa
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskReportsRequestPBImpl.java
@@ -0,0 +1,120 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobIdPBImpl;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobIdProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskTypeProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsRequestProtoOrBuilder;
+import org.apache.hadoop.mapreduce.v2.util.MRProtoUtils;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class GetTaskReportsRequestPBImpl extends ProtoBase<GetTaskReportsRequestProto> implements GetTaskReportsRequest {
+ GetTaskReportsRequestProto proto = GetTaskReportsRequestProto.getDefaultInstance();
+ GetTaskReportsRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private JobId jobId = null;
+
+
+ public GetTaskReportsRequestPBImpl() {
+ builder = GetTaskReportsRequestProto.newBuilder();
+ }
+
+ public GetTaskReportsRequestPBImpl(GetTaskReportsRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetTaskReportsRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.jobId != null) {
+ builder.setJobId(convertToProtoFormat(this.jobId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetTaskReportsRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public JobId getJobId() {
+ GetTaskReportsRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.jobId != null) {
+ return this.jobId;
+ }
+ if (!p.hasJobId()) {
+ return null;
+ }
+ this.jobId = convertFromProtoFormat(p.getJobId());
+ return this.jobId;
+ }
+
+ @Override
+ public void setJobId(JobId jobId) {
+ maybeInitBuilder();
+ if (jobId == null)
+ builder.clearJobId();
+ this.jobId = jobId;
+ }
+ @Override
+ public TaskType getTaskType() {
+ GetTaskReportsRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasTaskType()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getTaskType());
+ }
+
+ @Override
+ public void setTaskType(TaskType taskType) {
+ maybeInitBuilder();
+ if (taskType == null) {
+ builder.clearTaskType();
+ return;
+ }
+ builder.setTaskType(convertToProtoFormat(taskType));
+ }
+
+ private JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
+ return new JobIdPBImpl(p);
+ }
+
+ private JobIdProto convertToProtoFormat(JobId t) {
+ return ((JobIdPBImpl)t).getProto();
+ }
+
+ private TaskTypeProto convertToProtoFormat(TaskType e) {
+ return MRProtoUtils.convertToProtoFormat(e);
+ }
+
+ private TaskType convertFromProtoFormat(TaskTypeProto e) {
+ return MRProtoUtils.convertFromProtoFormat(e);
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskReportsResponsePBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskReportsResponsePBImpl.java
new file mode 100644
index 0000000..3c23641
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskReportsResponsePBImpl.java
@@ -0,0 +1,160 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskReportPBImpl;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskReportProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsResponseProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class GetTaskReportsResponsePBImpl extends ProtoBase<GetTaskReportsResponseProto> implements GetTaskReportsResponse {
+ GetTaskReportsResponseProto proto = GetTaskReportsResponseProto.getDefaultInstance();
+ GetTaskReportsResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private List<TaskReport> taskReports = null;
+
+
+ public GetTaskReportsResponsePBImpl() {
+ builder = GetTaskReportsResponseProto.newBuilder();
+ }
+
+ public GetTaskReportsResponsePBImpl(GetTaskReportsResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetTaskReportsResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.taskReports != null) {
+ addTaskReportsToProto();
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetTaskReportsResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public List<TaskReport> getTaskReportList() {
+ initTaskReports();
+ return this.taskReports;
+ }
+ @Override
+ public TaskReport getTaskReport(int index) {
+ initTaskReports();
+ return this.taskReports.get(index);
+ }
+ @Override
+ public int getTaskReportCount() {
+ initTaskReports();
+ return this.taskReports.size();
+ }
+
+ private void initTaskReports() {
+ if (this.taskReports != null) {
+ return;
+ }
+ GetTaskReportsResponseProtoOrBuilder p = viaProto ? proto : builder;
+ List<TaskReportProto> list = p.getTaskReportsList();
+ this.taskReports = new ArrayList<TaskReport>();
+
+ for (TaskReportProto c : list) {
+ this.taskReports.add(convertFromProtoFormat(c));
+ }
+ }
+
+ @Override
+ public void addAllTaskReports(final List<TaskReport> taskReports) {
+ if (taskReports == null)
+ return;
+ initTaskReports();
+ this.taskReports.addAll(taskReports);
+ }
+
+ private void addTaskReportsToProto() {
+ maybeInitBuilder();
+ builder.clearTaskReports();
+ if (taskReports == null)
+ return;
+ Iterable<TaskReportProto> iterable = new Iterable<TaskReportProto>() {
+ @Override
+ public Iterator<TaskReportProto> iterator() {
+ return new Iterator<TaskReportProto>() {
+
+ Iterator<TaskReport> iter = taskReports.iterator();
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+
+ @Override
+ public TaskReportProto next() {
+ return convertToProtoFormat(iter.next());
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+
+ }
+ };
+
+ }
+ };
+ builder.addAllTaskReports(iterable);
+ }
+ @Override
+ public void addTaskReport(TaskReport taskReports) {
+ initTaskReports();
+ this.taskReports.add(taskReports);
+ }
+ @Override
+ public void removeTaskReport(int index) {
+ initTaskReports();
+ this.taskReports.remove(index);
+ }
+ @Override
+ public void clearTaskReports() {
+ initTaskReports();
+ this.taskReports.clear();
+ }
+
+ private TaskReportPBImpl convertFromProtoFormat(TaskReportProto p) {
+ return new TaskReportPBImpl(p);
+ }
+
+ private TaskReportProto convertToProtoFormat(TaskReport t) {
+ return ((TaskReportPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillJobRequestPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillJobRequestPBImpl.java
new file mode 100644
index 0000000..bee4b45
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillJobRequestPBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobIdPBImpl;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobIdProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class KillJobRequestPBImpl extends ProtoBase<KillJobRequestProto> implements KillJobRequest {
+ KillJobRequestProto proto = KillJobRequestProto.getDefaultInstance();
+ KillJobRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private JobId jobId = null;
+
+
+ public KillJobRequestPBImpl() {
+ builder = KillJobRequestProto.newBuilder();
+ }
+
+ public KillJobRequestPBImpl(KillJobRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public KillJobRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.jobId != null) {
+ builder.setJobId(convertToProtoFormat(this.jobId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = KillJobRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public JobId getJobId() {
+ KillJobRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.jobId != null) {
+ return this.jobId;
+ }
+ if (!p.hasJobId()) {
+ return null;
+ }
+ this.jobId = convertFromProtoFormat(p.getJobId());
+ return this.jobId;
+ }
+
+ @Override
+ public void setJobId(JobId jobId) {
+ maybeInitBuilder();
+ if (jobId == null)
+ builder.clearJobId();
+ this.jobId = jobId;
+ }
+
+ private JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
+ return new JobIdPBImpl(p);
+ }
+
+ private JobIdProto convertToProtoFormat(JobId t) {
+ return ((JobIdPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillJobResponsePBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillJobResponsePBImpl.java
new file mode 100644
index 0000000..fd8f549
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillJobResponsePBImpl.java
@@ -0,0 +1,41 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobResponse;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobResponseProto;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class KillJobResponsePBImpl extends ProtoBase<KillJobResponseProto> implements KillJobResponse {
+ KillJobResponseProto proto = KillJobResponseProto.getDefaultInstance();
+ KillJobResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public KillJobResponsePBImpl() {
+ builder = KillJobResponseProto.newBuilder();
+ }
+
+ public KillJobResponsePBImpl(KillJobResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public KillJobResponseProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = KillJobResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskAttemptRequestPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskAttemptRequestPBImpl.java
new file mode 100644
index 0000000..a91eca0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskAttemptRequestPBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskAttemptIdPBImpl;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptIdProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class KillTaskAttemptRequestPBImpl extends ProtoBase<KillTaskAttemptRequestProto> implements KillTaskAttemptRequest {
+ KillTaskAttemptRequestProto proto = KillTaskAttemptRequestProto.getDefaultInstance();
+ KillTaskAttemptRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private TaskAttemptId taskAttemptId = null;
+
+
+ public KillTaskAttemptRequestPBImpl() {
+ builder = KillTaskAttemptRequestProto.newBuilder();
+ }
+
+ public KillTaskAttemptRequestPBImpl(KillTaskAttemptRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public KillTaskAttemptRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.taskAttemptId != null) {
+ builder.setTaskAttemptId(convertToProtoFormat(this.taskAttemptId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = KillTaskAttemptRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public TaskAttemptId getTaskAttemptId() {
+ KillTaskAttemptRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.taskAttemptId != null) {
+ return this.taskAttemptId;
+ }
+ if (!p.hasTaskAttemptId()) {
+ return null;
+ }
+ this.taskAttemptId = convertFromProtoFormat(p.getTaskAttemptId());
+ return this.taskAttemptId;
+ }
+
+ @Override
+ public void setTaskAttemptId(TaskAttemptId taskAttemptId) {
+ maybeInitBuilder();
+ if (taskAttemptId == null)
+ builder.clearTaskAttemptId();
+ this.taskAttemptId = taskAttemptId;
+ }
+
+ private TaskAttemptIdPBImpl convertFromProtoFormat(TaskAttemptIdProto p) {
+ return new TaskAttemptIdPBImpl(p);
+ }
+
+ private TaskAttemptIdProto convertToProtoFormat(TaskAttemptId t) {
+ return ((TaskAttemptIdPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskAttemptResponsePBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskAttemptResponsePBImpl.java
new file mode 100644
index 0000000..bf0c57f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskAttemptResponsePBImpl.java
@@ -0,0 +1,41 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptResponseProto;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class KillTaskAttemptResponsePBImpl extends ProtoBase<KillTaskAttemptResponseProto> implements KillTaskAttemptResponse {
+ KillTaskAttemptResponseProto proto = KillTaskAttemptResponseProto.getDefaultInstance();
+ KillTaskAttemptResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public KillTaskAttemptResponsePBImpl() {
+ builder = KillTaskAttemptResponseProto.newBuilder();
+ }
+
+ public KillTaskAttemptResponsePBImpl(KillTaskAttemptResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public KillTaskAttemptResponseProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = KillTaskAttemptResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskRequestPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskRequestPBImpl.java
new file mode 100644
index 0000000..f53cc35
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskRequestPBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskIdPBImpl;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskIdProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class KillTaskRequestPBImpl extends ProtoBase<KillTaskRequestProto> implements KillTaskRequest {
+ KillTaskRequestProto proto = KillTaskRequestProto.getDefaultInstance();
+ KillTaskRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private TaskId taskId = null;
+
+
+ public KillTaskRequestPBImpl() {
+ builder = KillTaskRequestProto.newBuilder();
+ }
+
+ public KillTaskRequestPBImpl(KillTaskRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public KillTaskRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.taskId != null) {
+ builder.setTaskId(convertToProtoFormat(this.taskId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = KillTaskRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public TaskId getTaskId() {
+ KillTaskRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.taskId != null) {
+ return this.taskId;
+ }
+ if (!p.hasTaskId()) {
+ return null;
+ }
+ this.taskId = convertFromProtoFormat(p.getTaskId());
+ return this.taskId;
+ }
+
+ @Override
+ public void setTaskId(TaskId taskId) {
+ maybeInitBuilder();
+ if (taskId == null)
+ builder.clearTaskId();
+ this.taskId = taskId;
+ }
+
+ private TaskIdPBImpl convertFromProtoFormat(TaskIdProto p) {
+ return new TaskIdPBImpl(p);
+ }
+
+ private TaskIdProto convertToProtoFormat(TaskId t) {
+ return ((TaskIdPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskResponsePBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskResponsePBImpl.java
new file mode 100644
index 0000000..2d9a349
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskResponsePBImpl.java
@@ -0,0 +1,41 @@
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskResponseProto;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class KillTaskResponsePBImpl extends ProtoBase<KillTaskResponseProto> implements KillTaskResponse {
+ KillTaskResponseProto proto = KillTaskResponseProto.getDefaultInstance();
+ KillTaskResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public KillTaskResponsePBImpl() {
+ builder = KillTaskResponseProto.newBuilder();
+ }
+
+ public KillTaskResponsePBImpl(KillTaskResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public KillTaskResponseProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = KillTaskResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/Counter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/Counter.java
new file mode 100644
index 0000000..fa7188f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/Counter.java
@@ -0,0 +1,11 @@
+package org.apache.hadoop.mapreduce.v2.api.records;
+
+public interface Counter {
+ public abstract String getName();
+ public abstract String getDisplayName();
+ public abstract long getValue();
+
+ public abstract void setName(String name);
+ public abstract void setDisplayName(String displayName);
+ public abstract void setValue(long value);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/CounterGroup.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/CounterGroup.java
new file mode 100644
index 0000000..b013ea2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/CounterGroup.java
@@ -0,0 +1,19 @@
+package org.apache.hadoop.mapreduce.v2.api.records;
+
+import java.util.Map;
+
+public interface CounterGroup {
+ public abstract String getName();
+ public abstract String getDisplayName();
+
+ public abstract Map<String, Counter> getAllCounters();
+ public abstract Counter getCounter(String key);
+
+ public abstract void setName(String name);
+ public abstract void setDisplayName(String displayName);
+
+ public abstract void addAllCounters(Map<String, Counter> counters);
+ public abstract void setCounter(String key, Counter value);
+ public abstract void removeCounter(String key);
+ public abstract void clearCounters();
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/Counters.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/Counters.java
new file mode 100644
index 0000000..23c523a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/Counters.java
@@ -0,0 +1,16 @@
+package org.apache.hadoop.mapreduce.v2.api.records;
+
+import java.util.Map;
+
+public interface Counters {
+ public abstract Map<String, CounterGroup> getAllCounterGroups();
+ public abstract CounterGroup getCounterGroup(String key);
+ public abstract Counter getCounter(Enum<?> key);
+
+ public abstract void addAllCounterGroups(Map<String, CounterGroup> counterGroups);
+ public abstract void setCounterGroup(String key, CounterGroup value);
+ public abstract void removeCounterGroup(String key);
+ public abstract void clearCounterGroups();
+
+ public abstract void incrCounter(Enum<?> key, long amount);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobId.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobId.java
new file mode 100644
index 0000000..516f9a8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobId.java
@@ -0,0 +1,12 @@
+package org.apache.hadoop.mapreduce.v2.api.records;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+
+public interface JobId {
+ public abstract ApplicationId getAppId();
+ public abstract int getId();
+
+ public abstract void setAppId(ApplicationId appId);
+ public abstract void setId(int id);
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java
new file mode 100644
index 0000000..186d2ef
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java
@@ -0,0 +1,21 @@
+package org.apache.hadoop.mapreduce.v2.api.records;
+
+public interface JobReport {
+ public abstract JobId getJobId();
+ public abstract JobState getJobState();
+ public abstract float getMapProgress();
+ public abstract float getReduceProgress();
+ public abstract float getCleanupProgress();
+ public abstract float getSetupProgress();
+ public abstract long getStartTime();
+ public abstract long getFinishTime();
+
+ public abstract void setJobId(JobId jobId);
+ public abstract void setJobState(JobState jobState);
+ public abstract void setMapProgress(float progress);
+ public abstract void setReduceProgress(float progress);
+ public abstract void setCleanupProgress(float progress);
+ public abstract void setSetupProgress(float progress);
+ public abstract void setStartTime(long startTime);
+ public abstract void setFinishTime(long finishTime);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobState.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobState.java
new file mode 100644
index 0000000..a8151c9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobState.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.api.records;
+
+public enum JobState {
+ NEW,
+ INITED,
+ RUNNING,
+ SUCCEEDED,
+ FAILED,
+ KILL_WAIT,
+ KILLED,
+ ERROR
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/Phase.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/Phase.java
new file mode 100644
index 0000000..f937fbd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/Phase.java
@@ -0,0 +1,5 @@
+package org.apache.hadoop.mapreduce.v2.api.records;
+
+public enum Phase {
+ STARTING, MAP, SHUFFLE, SORT, REDUCE, CLEANUP
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptCompletionEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptCompletionEvent.java
new file mode 100644
index 0000000..df0de75
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptCompletionEvent.java
@@ -0,0 +1,15 @@
+package org.apache.hadoop.mapreduce.v2.api.records;
+
+public interface TaskAttemptCompletionEvent {
+ public abstract TaskAttemptId getAttemptId();
+ public abstract TaskAttemptCompletionEventStatus getStatus();
+ public abstract String getMapOutputServerAddress();
+ public abstract int getAttemptRunTime();
+ public abstract int getEventId();
+
+ public abstract void setAttemptId(TaskAttemptId taskAttemptId);
+ public abstract void setStatus(TaskAttemptCompletionEventStatus status);
+ public abstract void setMapOutputServerAddress(String address);
+ public abstract void setAttemptRunTime(int runTime);
+ public abstract void setEventId(int eventId);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptCompletionEventStatus.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptCompletionEventStatus.java
new file mode 100644
index 0000000..10992af
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptCompletionEventStatus.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.mapreduce.v2.api.records;
+
+public enum TaskAttemptCompletionEventStatus {
+ FAILED,
+ KILLED,
+ SUCCEEDED,
+ OBSOLETE,
+ TIPFAILED
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptId.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptId.java
new file mode 100644
index 0000000..d5943a1
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptId.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.mapreduce.v2.api.records;
+
+public interface TaskAttemptId {
+ public abstract TaskId getTaskId();
+ public abstract int getId();
+
+ public abstract void setTaskId(TaskId taskId);
+ public abstract void setId(int id);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptReport.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptReport.java
new file mode 100644
index 0000000..354f128
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptReport.java
@@ -0,0 +1,24 @@
+package org.apache.hadoop.mapreduce.v2.api.records;
+
+public interface TaskAttemptReport {
+ public abstract TaskAttemptId getTaskAttemptId();
+ public abstract TaskAttemptState getTaskAttemptState();
+ public abstract float getProgress();
+ public abstract long getStartTime();
+ public abstract long getFinishTime();
+ public abstract Counters getCounters();
+ public abstract String getDiagnosticInfo();
+ public abstract String getStateString();
+ public abstract Phase getPhase();
+
+ public abstract void setTaskAttemptId(TaskAttemptId taskAttemptId);
+ public abstract void setTaskAttemptState(TaskAttemptState taskAttemptState);
+ public abstract void setProgress(float progress);
+ public abstract void setStartTime(long startTime);
+ public abstract void setFinishTime(long finishTime);
+ public abstract void setCounters(Counters counters);
+ public abstract void setDiagnosticInfo(String diagnosticInfo);
+ public abstract void setStateString(String stateString);
+ public abstract void setPhase(Phase phase);
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptState.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptState.java
new file mode 100644
index 0000000..829dbb5
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptState.java
@@ -0,0 +1,17 @@
+package org.apache.hadoop.mapreduce.v2.api.records;
+
+public enum TaskAttemptState {
+ NEW,
+ UNASSIGNED,
+ ASSIGNED,
+ RUNNING,
+ COMMIT_PENDING,
+ SUCCESS_CONTAINER_CLEANUP,
+ SUCCEEDED,
+ FAIL_CONTAINER_CLEANUP,
+ FAIL_TASK_CLEANUP,
+ FAILED,
+ KILL_CONTAINER_CLEANUP,
+ KILL_TASK_CLEANUP,
+ KILLED
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskId.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskId.java
new file mode 100644
index 0000000..6ec3c9f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskId.java
@@ -0,0 +1,11 @@
+package org.apache.hadoop.mapreduce.v2.api.records;
+
+public interface TaskId {
+ public abstract JobId getJobId();
+ public abstract TaskType getTaskType();
+ public abstract int getId();
+
+ public abstract void setJobId(JobId jobId);
+ public abstract void setTaskType(TaskType taskType);
+ public abstract void setId(int id);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskReport.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskReport.java
new file mode 100644
index 0000000..150970e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskReport.java
@@ -0,0 +1,42 @@
+package org.apache.hadoop.mapreduce.v2.api.records;
+
+import java.util.List;
+
+public interface TaskReport {
+ public abstract TaskId getTaskId();
+ public abstract TaskState getTaskState();
+ public abstract float getProgress();
+ public abstract long getStartTime();
+ public abstract long getFinishTime();
+ public abstract Counters getCounters();
+
+ public abstract List<TaskAttemptId> getRunningAttemptsList();
+ public abstract TaskAttemptId getRunningAttempt(int index);
+ public abstract int getRunningAttemptsCount();
+
+ public abstract TaskAttemptId getSuccessfulAttempt();
+
+ public abstract List<String> getDiagnosticsList();
+ public abstract String getDiagnostics(int index);
+ public abstract int getDiagnosticsCount();
+
+
+ public abstract void setTaskId(TaskId taskId);
+ public abstract void setTaskState(TaskState taskState);
+ public abstract void setProgress(float progress);
+ public abstract void setStartTime(long startTime);
+ public abstract void setFinishTime(long finishTime);
+ public abstract void setCounters(Counters counters);
+
+ public abstract void addAllRunningAttempts(List<TaskAttemptId> taskAttempts);
+ public abstract void addRunningAttempt(TaskAttemptId taskAttempt);
+ public abstract void removeRunningAttempt(int index);
+ public abstract void clearRunningAttempts();
+
+ public abstract void setSuccessfulAttempt(TaskAttemptId taskAttempt)
+;
+ public abstract void addAllDiagnostics(List<String> diagnostics);
+ public abstract void addDiagnostics(String diagnostics);
+ public abstract void removeDiagnostics(int index);
+ public abstract void clearDiagnostics();
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskState.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskState.java
new file mode 100644
index 0000000..e20ef28
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskState.java
@@ -0,0 +1,5 @@
+package org.apache.hadoop.mapreduce.v2.api.records;
+
+public enum TaskState {
+ NEW, SCHEDULED, RUNNING, SUCCEEDED, FAILED, KILL_WAIT, KILLED
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskType.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskType.java
new file mode 100644
index 0000000..ced82e1
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskType.java
@@ -0,0 +1,23 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.api.records;
+
+public enum TaskType {
+ MAP, REDUCE
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/CounterGroupPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/CounterGroupPBImpl.java
new file mode 100644
index 0000000..05105c9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/CounterGroupPBImpl.java
@@ -0,0 +1,189 @@
+package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
+
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.mapreduce.v2.api.records.Counter;
+import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CounterGroupProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CounterGroupProtoOrBuilder;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CounterProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.StringCounterMapProto;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class CounterGroupPBImpl extends ProtoBase<CounterGroupProto> implements CounterGroup {
+ CounterGroupProto proto = CounterGroupProto.getDefaultInstance();
+ CounterGroupProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private Map<String, Counter> counters = null;
+
+
+ public CounterGroupPBImpl() {
+ builder = CounterGroupProto.newBuilder();
+ }
+
+ public CounterGroupPBImpl(CounterGroupProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public CounterGroupProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.counters != null) {
+ addContersToProto();
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = CounterGroupProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public String getName() {
+ CounterGroupProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasName()) {
+ return null;
+ }
+ return (p.getName());
+ }
+
+ @Override
+ public void setName(String name) {
+ maybeInitBuilder();
+ if (name == null) {
+ builder.clearName();
+ return;
+ }
+ builder.setName((name));
+ }
+ @Override
+ public String getDisplayName() {
+ CounterGroupProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasDisplayName()) {
+ return null;
+ }
+ return (p.getDisplayName());
+ }
+
+ @Override
+ public void setDisplayName(String displayName) {
+ maybeInitBuilder();
+ if (displayName == null) {
+ builder.clearDisplayName();
+ return;
+ }
+ builder.setDisplayName((displayName));
+ }
+ @Override
+ public Map<String, Counter> getAllCounters() {
+ initCounters();
+ return this.counters;
+ }
+ @Override
+ public Counter getCounter(String key) {
+ initCounters();
+ return this.counters.get(key);
+ }
+
+ private void initCounters() {
+ if (this.counters != null) {
+ return;
+ }
+ CounterGroupProtoOrBuilder p = viaProto ? proto : builder;
+ List<StringCounterMapProto> list = p.getCountersList();
+ this.counters = new HashMap<String, Counter>();
+
+ for (StringCounterMapProto c : list) {
+ this.counters.put(c.getKey(), convertFromProtoFormat(c.getValue()));
+ }
+ }
+
+ @Override
+ public void addAllCounters(final Map<String, Counter> counters) {
+ if (counters == null)
+ return;
+ initCounters();
+ this.counters.putAll(counters);
+ }
+
+ private void addContersToProto() {
+ maybeInitBuilder();
+ builder.clearCounters();
+ if (counters == null)
+ return;
+ Iterable<StringCounterMapProto> iterable = new Iterable<StringCounterMapProto>() {
+
+ @Override
+ public Iterator<StringCounterMapProto> iterator() {
+ return new Iterator<StringCounterMapProto>() {
+
+ Iterator<String> keyIter = counters.keySet().iterator();
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public StringCounterMapProto next() {
+ String key = keyIter.next();
+ return StringCounterMapProto.newBuilder().setKey(key).setValue(convertToProtoFormat(counters.get(key))).build();
+ }
+
+ @Override
+ public boolean hasNext() {
+ return keyIter.hasNext();
+ }
+ };
+ }
+ };
+ builder.addAllCounters(iterable);
+ }
+ @Override
+ public void setCounter(String key, Counter val) {
+ initCounters();
+ this.counters.put(key, val);
+ }
+ @Override
+ public void removeCounter(String key) {
+ initCounters();
+ this.counters.remove(key);
+ }
+ @Override
+ public void clearCounters() {
+ initCounters();
+ this.counters.clear();
+ }
+
+ private CounterPBImpl convertFromProtoFormat(CounterProto p) {
+ return new CounterPBImpl(p);
+ }
+
+ private CounterProto convertToProtoFormat(Counter t) {
+ return ((CounterPBImpl)t).getProto();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/CounterPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/CounterPBImpl.java
new file mode 100644
index 0000000..d5f055c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/CounterPBImpl.java
@@ -0,0 +1,89 @@
+package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.records.Counter;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CounterProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CounterProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class CounterPBImpl extends ProtoBase<CounterProto> implements Counter {
+ CounterProto proto = CounterProto.getDefaultInstance();
+ CounterProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public CounterPBImpl() {
+ builder = CounterProto.newBuilder();
+ }
+
+ public CounterPBImpl(CounterProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public CounterProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = CounterProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public String getName() {
+ CounterProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasName()) {
+ return null;
+ }
+ return (p.getName());
+ }
+
+ @Override
+ public void setName(String name) {
+ maybeInitBuilder();
+ if (name == null) {
+ builder.clearName();
+ return;
+ }
+ builder.setName((name));
+ }
+ @Override
+ public long getValue() {
+ CounterProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getValue());
+ }
+
+ @Override
+ public void setValue(long value) {
+ maybeInitBuilder();
+ builder.setValue((value));
+ }
+ @Override
+ public String getDisplayName() {
+ CounterProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasDisplayName()) {
+ return null;
+ }
+ return (p.getDisplayName());
+ }
+
+ @Override
+ public void setDisplayName(String displayName) {
+ maybeInitBuilder();
+ if (displayName == null) {
+ builder.clearDisplayName();
+ return;
+ }
+ builder.setDisplayName((displayName));
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/CountersPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/CountersPBImpl.java
new file mode 100644
index 0000000..66f32e0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/CountersPBImpl.java
@@ -0,0 +1,179 @@
+package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
+
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.mapreduce.v2.api.records.Counter;
+import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CounterGroupProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CountersProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CountersProtoOrBuilder;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.StringCounterGroupMapProto;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class CountersPBImpl extends ProtoBase<CountersProto> implements Counters {
+ CountersProto proto = CountersProto.getDefaultInstance();
+ CountersProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private Map<String, CounterGroup> counterGroups = null;
+
+
+ public CountersPBImpl() {
+ builder = CountersProto.newBuilder();
+ }
+
+ public CountersPBImpl(CountersProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public CountersProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.counterGroups != null) {
+ addCounterGroupsToProto();
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = CountersProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public Map<String, CounterGroup> getAllCounterGroups() {
+ initCounterGroups();
+ return this.counterGroups;
+ }
+ @Override
+ public CounterGroup getCounterGroup(String key) {
+ initCounterGroups();
+ return this.counterGroups.get(key);
+ }
+ @Override
+ public Counter getCounter(Enum<?> key) {
+ CounterGroup group = getCounterGroup(key.getDeclaringClass().getName());
+ return group == null ? null : group.getCounter(key.name());
+ }
+
+ @Override
+ public void incrCounter(Enum<?> key, long amount) {
+ String groupName = key.getDeclaringClass().getName();
+ if (getCounterGroup(groupName) == null) {
+ CounterGroup cGrp = new CounterGroupPBImpl();
+ cGrp.setName(groupName);
+ cGrp.setDisplayName(groupName);
+ setCounterGroup(groupName, cGrp);
+ }
+ if (getCounterGroup(groupName).getCounter(key.name()) == null) {
+ Counter c = new CounterPBImpl();
+ c.setName(key.name());
+ c.setDisplayName(key.name());
+ c.setValue(0l);
+ getCounterGroup(groupName).setCounter(key.name(), c);
+ }
+ Counter counter = getCounterGroup(groupName).getCounter(key.name());
+ counter.setValue(counter.getValue() + amount);
+ }
+
+ private void initCounterGroups() {
+ if (this.counterGroups != null) {
+ return;
+ }
+ CountersProtoOrBuilder p = viaProto ? proto : builder;
+ List<StringCounterGroupMapProto> list = p.getCounterGroupsList();
+ this.counterGroups = new HashMap<String, CounterGroup>();
+
+ for (StringCounterGroupMapProto c : list) {
+ this.counterGroups.put(c.getKey(), convertFromProtoFormat(c.getValue()));
+ }
+ }
+
+ @Override
+ public void addAllCounterGroups(final Map<String, CounterGroup> counterGroups) {
+ if (counterGroups == null)
+ return;
+ initCounterGroups();
+ this.counterGroups.putAll(counterGroups);
+ }
+
+ private void addCounterGroupsToProto() {
+ maybeInitBuilder();
+ builder.clearCounterGroups();
+ if (counterGroups == null)
+ return;
+ Iterable<StringCounterGroupMapProto> iterable = new Iterable<StringCounterGroupMapProto>() {
+
+ @Override
+ public Iterator<StringCounterGroupMapProto> iterator() {
+ return new Iterator<StringCounterGroupMapProto>() {
+
+ Iterator<String> keyIter = counterGroups.keySet().iterator();
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public StringCounterGroupMapProto next() {
+ String key = keyIter.next();
+ return StringCounterGroupMapProto.newBuilder().setKey(key).setValue(convertToProtoFormat(counterGroups.get(key))).build();
+ }
+
+ @Override
+ public boolean hasNext() {
+ return keyIter.hasNext();
+ }
+ };
+ }
+ };
+ builder.addAllCounterGroups(iterable);
+ }
+ @Override
+ public void setCounterGroup(String key, CounterGroup val) {
+ initCounterGroups();
+ this.counterGroups.put(key, val);
+ }
+ @Override
+ public void removeCounterGroup(String key) {
+ initCounterGroups();
+ this.counterGroups.remove(key);
+ }
+ @Override
+ public void clearCounterGroups() {
+ initCounterGroups();
+ this.counterGroups.clear();
+ }
+
+ private CounterGroupPBImpl convertFromProtoFormat(CounterGroupProto p) {
+ return new CounterGroupPBImpl(p);
+ }
+
+ private CounterGroupProto convertToProtoFormat(CounterGroup t) {
+ return ((CounterGroupPBImpl)t).getProto();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobIdPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobIdPBImpl.java
new file mode 100644
index 0000000..000a2f6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobIdPBImpl.java
@@ -0,0 +1,141 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
+
+import java.text.NumberFormat;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobIdProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobIdProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
+
+public class JobIdPBImpl extends ProtoBase<JobIdProto> implements JobId {
+
+ protected static final String JOB = "job";
+ protected static final char SEPARATOR = '_';
+ protected static final NumberFormat idFormat = NumberFormat.getInstance();
+ static {
+ idFormat.setGroupingUsed(false);
+ idFormat.setMinimumIntegerDigits(4);
+ }
+
+
+ JobIdProto proto = JobIdProto.getDefaultInstance();
+ JobIdProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ApplicationId applicationId = null;
+// boolean hasLocalAppId = false;
+
+
+ public JobIdPBImpl() {
+ builder = JobIdProto.newBuilder();
+ }
+
+ public JobIdPBImpl(JobIdProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ @Override
+ public synchronized JobIdProto getProto() {
+
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private synchronized void mergeLocalToBuilder() {
+ if (this.applicationId != null && !((ApplicationIdPBImpl)this.applicationId).getProto().equals(builder.getAppId())) {
+ builder.setAppId(convertToProtoFormat(this.applicationId));
+ }
+ }
+
+ private synchronized void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private synchronized void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = JobIdProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public synchronized ApplicationId getAppId() {
+ JobIdProtoOrBuilder p = viaProto ? proto : builder;
+ if (applicationId != null) {
+ return applicationId;
+ } // Else via proto
+ if (!p.hasAppId()) {
+ return null;
+ }
+ applicationId = convertFromProtoFormat(p.getAppId());
+ return applicationId;
+ }
+
+ @Override
+ public synchronized void setAppId(ApplicationId appId) {
+ maybeInitBuilder();
+ if (appId == null) {
+ builder.clearAppId();
+ }
+ this.applicationId = appId;
+// builder.setAppId(convertToProtoFormat(appId));
+ }
+ @Override
+ public synchronized int getId() {
+ JobIdProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getId());
+ }
+
+ @Override
+ public synchronized void setId(int id) {
+ maybeInitBuilder();
+ builder.setId((id));
+ }
+
+ private synchronized ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) {
+ return new ApplicationIdPBImpl(p);
+ }
+
+ private synchronized ApplicationIdProto convertToProtoFormat(ApplicationId t) {
+ return ((ApplicationIdPBImpl)t).getProto();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder(JOB);
+ builder.append(SEPARATOR);
+ builder.append(getAppId().getClusterTimestamp());
+ builder.append(SEPARATOR);
+ builder.append(idFormat.format(getId()));
+ return builder.toString();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java
new file mode 100644
index 0000000..dc9fdd4
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java
@@ -0,0 +1,185 @@
+package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobIdProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobReportProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobReportProtoOrBuilder;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobStateProto;
+import org.apache.hadoop.mapreduce.v2.util.MRProtoUtils;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class JobReportPBImpl extends ProtoBase<JobReportProto> implements JobReport {
+ JobReportProto proto = JobReportProto.getDefaultInstance();
+ JobReportProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private JobId jobId = null;
+
+
+ public JobReportPBImpl() {
+ builder = JobReportProto.newBuilder();
+ }
+
+ public JobReportPBImpl(JobReportProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public JobReportProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.jobId != null) {
+ builder.setJobId(convertToProtoFormat(this.jobId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = JobReportProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public JobId getJobId() {
+ JobReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.jobId != null) {
+ return this.jobId;
+ }
+ if (!p.hasJobId()) {
+ return null;
+ }
+ this.jobId = convertFromProtoFormat(p.getJobId());
+ return this.jobId;
+ }
+
+ @Override
+ public void setJobId(JobId jobId) {
+ maybeInitBuilder();
+ if (jobId == null)
+ builder.clearJobId();
+ this.jobId = jobId;
+ }
+ @Override
+ public JobState getJobState() {
+ JobReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasJobState()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getJobState());
+ }
+
+ @Override
+ public void setJobState(JobState jobState) {
+ maybeInitBuilder();
+ if (jobState == null) {
+ builder.clearJobState();
+ return;
+ }
+ builder.setJobState(convertToProtoFormat(jobState));
+ }
+ @Override
+ public float getMapProgress() {
+ JobReportProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getMapProgress());
+ }
+
+ @Override
+ public void setMapProgress(float mapProgress) {
+ maybeInitBuilder();
+ builder.setMapProgress((mapProgress));
+ }
+ @Override
+ public float getReduceProgress() {
+ JobReportProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getReduceProgress());
+ }
+
+ @Override
+ public void setReduceProgress(float reduceProgress) {
+ maybeInitBuilder();
+ builder.setReduceProgress((reduceProgress));
+ }
+ @Override
+ public float getCleanupProgress() {
+ JobReportProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getCleanupProgress());
+ }
+
+ @Override
+ public void setCleanupProgress(float cleanupProgress) {
+ maybeInitBuilder();
+ builder.setCleanupProgress((cleanupProgress));
+ }
+ @Override
+ public float getSetupProgress() {
+ JobReportProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getSetupProgress());
+ }
+
+ @Override
+ public void setSetupProgress(float setupProgress) {
+ maybeInitBuilder();
+ builder.setSetupProgress((setupProgress));
+ }
+ @Override
+ public long getStartTime() {
+ JobReportProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getStartTime());
+ }
+
+ @Override
+ public void setStartTime(long startTime) {
+ maybeInitBuilder();
+ builder.setStartTime((startTime));
+ }
+ @Override
+ public long getFinishTime() {
+ JobReportProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getFinishTime());
+ }
+
+ @Override
+ public void setFinishTime(long finishTime) {
+ maybeInitBuilder();
+ builder.setFinishTime((finishTime));
+ }
+
+ private JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
+ return new JobIdPBImpl(p);
+ }
+
+ private JobIdProto convertToProtoFormat(JobId t) {
+ return ((JobIdPBImpl)t).getProto();
+ }
+
+ private JobStateProto convertToProtoFormat(JobState e) {
+ return MRProtoUtils.convertToProtoFormat(e);
+ }
+
+ private JobState convertFromProtoFormat(JobStateProto e) {
+ return MRProtoUtils.convertFromProtoFormat(e);
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptCompletionEventPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptCompletionEventPBImpl.java
new file mode 100644
index 0000000..4f388e6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptCompletionEventPBImpl.java
@@ -0,0 +1,159 @@
+package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptCompletionEventProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptCompletionEventProtoOrBuilder;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptCompletionEventStatusProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptIdProto;
+import org.apache.hadoop.mapreduce.v2.util.MRProtoUtils;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class TaskAttemptCompletionEventPBImpl extends ProtoBase<TaskAttemptCompletionEventProto> implements TaskAttemptCompletionEvent {
+ TaskAttemptCompletionEventProto proto = TaskAttemptCompletionEventProto.getDefaultInstance();
+ TaskAttemptCompletionEventProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private TaskAttemptId taskAttemptId = null;
+
+
+ public TaskAttemptCompletionEventPBImpl() {
+ builder = TaskAttemptCompletionEventProto.newBuilder();
+ }
+
+ public TaskAttemptCompletionEventPBImpl(TaskAttemptCompletionEventProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public TaskAttemptCompletionEventProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.taskAttemptId != null) {
+ builder.setAttemptId(convertToProtoFormat(this.taskAttemptId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = TaskAttemptCompletionEventProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public TaskAttemptId getAttemptId() {
+ TaskAttemptCompletionEventProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.taskAttemptId != null) {
+ return this.taskAttemptId;
+ }
+ if (!p.hasAttemptId()) {
+ return null;
+ }
+ this.taskAttemptId = convertFromProtoFormat(p.getAttemptId());
+ return this.taskAttemptId;
+ }
+
+ @Override
+ public void setAttemptId(TaskAttemptId attemptId) {
+ maybeInitBuilder();
+ if (attemptId == null)
+ builder.clearAttemptId();
+ this.taskAttemptId = attemptId;
+ }
+ @Override
+ public TaskAttemptCompletionEventStatus getStatus() {
+ TaskAttemptCompletionEventProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasStatus()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getStatus());
+ }
+
+ @Override
+ public void setStatus(TaskAttemptCompletionEventStatus status) {
+ maybeInitBuilder();
+ if (status == null) {
+ builder.clearStatus();
+ return;
+ }
+ builder.setStatus(convertToProtoFormat(status));
+ }
+ @Override
+ public String getMapOutputServerAddress() {
+ TaskAttemptCompletionEventProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasMapOutputServerAddress()) {
+ return null;
+ }
+ return (p.getMapOutputServerAddress());
+ }
+
+ @Override
+ public void setMapOutputServerAddress(String mapOutputServerAddress) {
+ maybeInitBuilder();
+ if (mapOutputServerAddress == null) {
+ builder.clearMapOutputServerAddress();
+ return;
+ }
+ builder.setMapOutputServerAddress((mapOutputServerAddress));
+ }
+ @Override
+ public int getAttemptRunTime() {
+ TaskAttemptCompletionEventProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getAttemptRunTime());
+ }
+
+ @Override
+ public void setAttemptRunTime(int attemptRunTime) {
+ maybeInitBuilder();
+ builder.setAttemptRunTime((attemptRunTime));
+ }
+ @Override
+ public int getEventId() {
+ TaskAttemptCompletionEventProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getEventId());
+ }
+
+ @Override
+ public void setEventId(int eventId) {
+ maybeInitBuilder();
+ builder.setEventId((eventId));
+ }
+
+ private TaskAttemptIdPBImpl convertFromProtoFormat(TaskAttemptIdProto p) {
+ return new TaskAttemptIdPBImpl(p);
+ }
+
+ private TaskAttemptIdProto convertToProtoFormat(TaskAttemptId t) {
+ return ((TaskAttemptIdPBImpl)t).getProto();
+ }
+
+ private TaskAttemptCompletionEventStatusProto convertToProtoFormat(TaskAttemptCompletionEventStatus e) {
+ return MRProtoUtils.convertToProtoFormat(e);
+ }
+
+ private TaskAttemptCompletionEventStatus convertFromProtoFormat(TaskAttemptCompletionEventStatusProto e) {
+ return MRProtoUtils.convertFromProtoFormat(e);
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptIdPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptIdPBImpl.java
new file mode 100644
index 0000000..b88add8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptIdPBImpl.java
@@ -0,0 +1,124 @@
+package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
+
+
+import java.text.NumberFormat;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptIdProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptIdProtoOrBuilder;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskIdProto;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class TaskAttemptIdPBImpl extends ProtoBase<TaskAttemptIdProto> implements TaskAttemptId {
+ TaskAttemptIdProto proto = TaskAttemptIdProto.getDefaultInstance();
+ TaskAttemptIdProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private TaskId taskId = null;
+ protected static final NumberFormat idFormat = NumberFormat.getInstance();
+ static {
+ idFormat.setGroupingUsed(false);
+ idFormat.setMinimumIntegerDigits(6);
+ }
+
+ protected static final NumberFormat jobidFormat = NumberFormat.getInstance();
+ static {
+ jobidFormat.setGroupingUsed(false);
+ jobidFormat.setMinimumIntegerDigits(4);
+ }
+
+
+ public TaskAttemptIdPBImpl() {
+ builder = TaskAttemptIdProto.newBuilder();
+ }
+
+ public TaskAttemptIdPBImpl(TaskAttemptIdProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public TaskAttemptIdProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.taskId != null && !((TaskIdPBImpl)this.taskId).getProto().equals(builder.getTaskId())) {
+ builder.setTaskId(convertToProtoFormat(this.taskId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = TaskAttemptIdProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public int getId() {
+ TaskAttemptIdProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getId());
+ }
+
+ @Override
+ public void setId(int id) {
+ maybeInitBuilder();
+ builder.setId((id));
+ }
+ @Override
+ public TaskId getTaskId() {
+ TaskAttemptIdProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.taskId != null) {
+ return this.taskId;
+ }
+ if (!p.hasTaskId()) {
+ return null;
+ }
+ taskId = convertFromProtoFormat(p.getTaskId());
+ return taskId;
+ }
+
+ @Override
+ public void setTaskId(TaskId taskId) {
+ maybeInitBuilder();
+ if (taskId == null)
+ builder.clearTaskId();
+ this.taskId = taskId;
+ }
+
+ private TaskIdPBImpl convertFromProtoFormat(TaskIdProto p) {
+ return new TaskIdPBImpl(p);
+ }
+
+ private TaskIdProto convertToProtoFormat(TaskId t) {
+ return ((TaskIdPBImpl)t).getProto();
+ }
+
+ @Override
+ public String toString() {
+ String identifier = (getTaskId() == null) ? "none":
+ getTaskId().getJobId().getAppId().getClusterTimestamp() + "_" +
+ jobidFormat.format(getTaskId().getJobId().getAppId().getId()) + "_" +
+ ((getTaskId().getTaskType() == TaskType.MAP) ? "m" : "r") + "_" +
+ idFormat.format(getTaskId().getId()) + "_" +
+ getId();
+
+ return "attempt_" + identifier;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptReportPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptReportPBImpl.java
new file mode 100644
index 0000000..a81aced
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptReportPBImpl.java
@@ -0,0 +1,250 @@
+package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
+
+
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.v2.api.records.Phase;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CountersProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.PhaseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptIdProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptReportProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptReportProtoOrBuilder;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptStateProto;
+import org.apache.hadoop.mapreduce.v2.util.MRProtoUtils;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class TaskAttemptReportPBImpl extends ProtoBase<TaskAttemptReportProto> implements TaskAttemptReport {
+ TaskAttemptReportProto proto = TaskAttemptReportProto.getDefaultInstance();
+ TaskAttemptReportProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private TaskAttemptId taskAttemptId = null;
+ private Counters counters = null;
+
+
+ public TaskAttemptReportPBImpl() {
+ builder = TaskAttemptReportProto.newBuilder();
+ }
+
+ public TaskAttemptReportPBImpl(TaskAttemptReportProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public TaskAttemptReportProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.taskAttemptId != null) {
+ builder.setTaskAttemptId(convertToProtoFormat(this.taskAttemptId));
+ }
+ if (this.counters != null) {
+ builder.setCounters(convertToProtoFormat(this.counters));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = TaskAttemptReportProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public Counters getCounters() {
+ TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.counters != null) {
+ return this.counters;
+ }
+ if (!p.hasCounters()) {
+ return null;
+ }
+ this.counters = convertFromProtoFormat(p.getCounters());
+ return this.counters;
+ }
+
+ @Override
+ public void setCounters(Counters counters) {
+ maybeInitBuilder();
+ if (counters == null)
+ builder.clearCounters();
+ this.counters = counters;
+ }
+ @Override
+ public long getStartTime() {
+ TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getStartTime());
+ }
+
+ @Override
+ public void setStartTime(long startTime) {
+ maybeInitBuilder();
+ builder.setStartTime((startTime));
+ }
+ @Override
+ public long getFinishTime() {
+ TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getFinishTime());
+ }
+
+ @Override
+ public void setFinishTime(long finishTime) {
+ maybeInitBuilder();
+ builder.setFinishTime((finishTime));
+ }
+ @Override
+ public TaskAttemptId getTaskAttemptId() {
+ TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.taskAttemptId != null) {
+ return this.taskAttemptId;
+ }
+ if (!p.hasTaskAttemptId()) {
+ return null;
+ }
+ this.taskAttemptId = convertFromProtoFormat(p.getTaskAttemptId());
+ return this.taskAttemptId;
+ }
+
+ @Override
+ public void setTaskAttemptId(TaskAttemptId taskAttemptId) {
+ maybeInitBuilder();
+ if (taskAttemptId == null)
+ builder.clearTaskAttemptId();
+ this.taskAttemptId = taskAttemptId;
+ }
+ @Override
+ public TaskAttemptState getTaskAttemptState() {
+ TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasTaskAttemptState()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getTaskAttemptState());
+ }
+
+ @Override
+ public void setTaskAttemptState(TaskAttemptState taskAttemptState) {
+ maybeInitBuilder();
+ if (taskAttemptState == null) {
+ builder.clearTaskAttemptState();
+ return;
+ }
+ builder.setTaskAttemptState(convertToProtoFormat(taskAttemptState));
+ }
+ @Override
+ public float getProgress() {
+ TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getProgress());
+ }
+
+ @Override
+ public void setProgress(float progress) {
+ maybeInitBuilder();
+ builder.setProgress((progress));
+ }
+ @Override
+ public String getDiagnosticInfo() {
+ TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasDiagnosticInfo()) {
+ return null;
+ }
+ return (p.getDiagnosticInfo());
+ }
+
+ @Override
+ public void setDiagnosticInfo(String diagnosticInfo) {
+ maybeInitBuilder();
+ if (diagnosticInfo == null) {
+ builder.clearDiagnosticInfo();
+ return;
+ }
+ builder.setDiagnosticInfo((diagnosticInfo));
+ }
+ @Override
+ public String getStateString() {
+ TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasStateString()) {
+ return null;
+ }
+ return (p.getStateString());
+ }
+
+ @Override
+ public void setStateString(String stateString) {
+ maybeInitBuilder();
+ if (stateString == null) {
+ builder.clearStateString();
+ return;
+ }
+ builder.setStateString((stateString));
+ }
+ @Override
+ public Phase getPhase() {
+ TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasPhase()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getPhase());
+ }
+
+ @Override
+ public void setPhase(Phase phase) {
+ maybeInitBuilder();
+ if (phase == null) {
+ builder.clearPhase();
+ return;
+ }
+ builder.setPhase(convertToProtoFormat(phase));
+ }
+
+ private CountersPBImpl convertFromProtoFormat(CountersProto p) {
+ return new CountersPBImpl(p);
+ }
+
+ private CountersProto convertToProtoFormat(Counters t) {
+ return ((CountersPBImpl)t).getProto();
+ }
+
+ private TaskAttemptIdPBImpl convertFromProtoFormat(TaskAttemptIdProto p) {
+ return new TaskAttemptIdPBImpl(p);
+ }
+
+ private TaskAttemptIdProto convertToProtoFormat(TaskAttemptId t) {
+ return ((TaskAttemptIdPBImpl)t).getProto();
+ }
+
+ private TaskAttemptStateProto convertToProtoFormat(TaskAttemptState e) {
+ return MRProtoUtils.convertToProtoFormat(e);
+ }
+
+ private TaskAttemptState convertFromProtoFormat(TaskAttemptStateProto e) {
+ return MRProtoUtils.convertFromProtoFormat(e);
+ }
+
+ private PhaseProto convertToProtoFormat(Phase e) {
+ return MRProtoUtils.convertToProtoFormat(e);
+ }
+
+ private Phase convertFromProtoFormat(PhaseProto e) {
+ return MRProtoUtils.convertFromProtoFormat(e);
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskIdPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskIdPBImpl.java
new file mode 100644
index 0000000..0bfb1a0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskIdPBImpl.java
@@ -0,0 +1,169 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
+
+
+import java.text.NumberFormat;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobIdProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskIdProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskIdProtoOrBuilder;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskTypeProto;
+import org.apache.hadoop.mapreduce.v2.util.MRProtoUtils;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class TaskIdPBImpl extends ProtoBase<TaskIdProto> implements TaskId {
+ TaskIdProto proto = TaskIdProto.getDefaultInstance();
+ TaskIdProto.Builder builder = null;
+ boolean viaProto = false;
+ protected static final NumberFormat idFormat = NumberFormat.getInstance();
+ static {
+ idFormat.setGroupingUsed(false);
+ idFormat.setMinimumIntegerDigits(6);
+ }
+
+ protected static final NumberFormat jobidFormat = NumberFormat.getInstance();
+ static {
+ jobidFormat.setGroupingUsed(false);
+ jobidFormat.setMinimumIntegerDigits(4);
+ }
+
+
+ private JobId jobId = null;
+
+
+ public TaskIdPBImpl() {
+ builder = TaskIdProto.newBuilder(proto);
+ }
+
+ public TaskIdPBImpl(TaskIdProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public synchronized TaskIdProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private synchronized void mergeLocalToBuilder() {
+ if (this.jobId != null && !((JobIdPBImpl)this.jobId).getProto().equals(builder.getJobId()) ) {
+ builder.setJobId(convertToProtoFormat(this.jobId));
+ }
+ }
+
+ private synchronized void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private synchronized void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = TaskIdProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public synchronized int getId() {
+ TaskIdProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getId());
+ }
+
+ @Override
+ public synchronized void setId(int id) {
+ maybeInitBuilder();
+ builder.setId((id));
+ }
+ @Override
+ public synchronized JobId getJobId() {
+ TaskIdProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.jobId != null) {
+ return this.jobId;
+ }
+ if (!p.hasJobId()) {
+ return null;
+ }
+ jobId = convertFromProtoFormat(p.getJobId());
+ return jobId;
+ }
+
+ @Override
+ public synchronized void setJobId(JobId jobId) {
+ maybeInitBuilder();
+ if (jobId == null)
+ builder.clearJobId();
+ this.jobId = jobId;
+ }
+ @Override
+ public synchronized TaskType getTaskType() {
+ TaskIdProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasTaskType()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getTaskType());
+ }
+
+ @Override
+ public synchronized void setTaskType(TaskType taskType) {
+ maybeInitBuilder();
+ if (taskType == null) {
+ builder.clearTaskType();
+ return;
+ }
+ builder.setTaskType(convertToProtoFormat(taskType));
+ }
+
+ private synchronized JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
+ return new JobIdPBImpl(p);
+ }
+
+ private synchronized JobIdProto convertToProtoFormat(JobId t) {
+ return ((JobIdPBImpl)t).getProto();
+ }
+
+ private synchronized TaskTypeProto convertToProtoFormat(TaskType e) {
+ return MRProtoUtils.convertToProtoFormat(e);
+ }
+
+ private synchronized TaskType convertFromProtoFormat(TaskTypeProto e) {
+ return MRProtoUtils.convertFromProtoFormat(e);
+ }
+
+
+ @Override
+ public synchronized String toString() {
+ String jobIdentifier = (jobId == null) ? "none":
+ jobId.getAppId().getClusterTimestamp() + "_" +
+ jobidFormat.format(jobId.getAppId().getId()) + "_" +
+ ((getTaskType() == TaskType.MAP) ? "m":"r") + "_" + idFormat.format(getId());
+ return "task_" + jobIdentifier;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskReportPBImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskReportPBImpl.java
new file mode 100644
index 0000000..3053705
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskReportPBImpl.java
@@ -0,0 +1,376 @@
+package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
+
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CountersProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptIdProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskIdProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskReportProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskReportProtoOrBuilder;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskStateProto;
+import org.apache.hadoop.mapreduce.v2.util.MRProtoUtils;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+
+public class TaskReportPBImpl extends ProtoBase<TaskReportProto> implements TaskReport {
+ TaskReportProto proto = TaskReportProto.getDefaultInstance();
+ TaskReportProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private TaskId taskId = null;
+ private Counters counters = null;
+ private List<TaskAttemptId> runningAttempts = null;
+ private TaskAttemptId successfulAttemptId = null;
+ private List<String> diagnostics = null;
+
+
+ public TaskReportPBImpl() {
+ builder = TaskReportProto.newBuilder();
+ }
+
+ public TaskReportPBImpl(TaskReportProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public TaskReportProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.taskId != null) {
+ builder.setTaskId(convertToProtoFormat(this.taskId));
+ }
+ if (this.counters != null) {
+ builder.setCounters(convertToProtoFormat(this.counters));
+ }
+ if (this.runningAttempts != null) {
+ addRunningAttemptsToProto();
+ }
+ if (this.successfulAttemptId != null) {
+ builder.setSuccessfulAttempt(convertToProtoFormat(this.successfulAttemptId));
+ }
+ if (this.diagnostics != null) {
+ addDiagnosticsToProto();
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = TaskReportProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public Counters getCounters() {
+ TaskReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.counters != null) {
+ return this.counters;
+ }
+ if (!p.hasCounters()) {
+ return null;
+ }
+ this.counters = convertFromProtoFormat(p.getCounters());
+ return this.counters;
+ }
+
+ @Override
+ public void setCounters(Counters counters) {
+ maybeInitBuilder();
+ if (counters == null)
+ builder.clearCounters();
+ this.counters = counters;
+ }
+ @Override
+ public long getStartTime() {
+ TaskReportProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getStartTime());
+ }
+
+ @Override
+ public void setStartTime(long startTime) {
+ maybeInitBuilder();
+ builder.setStartTime((startTime));
+ }
+ @Override
+ public long getFinishTime() {
+ TaskReportProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getFinishTime());
+ }
+
+ @Override
+ public void setFinishTime(long finishTime) {
+ maybeInitBuilder();
+ builder.setFinishTime((finishTime));
+ }
+ @Override
+ public TaskId getTaskId() {
+ TaskReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.taskId != null) {
+ return this.taskId;
+ }
+ if (!p.hasTaskId()) {
+ return null;
+ }
+ this.taskId = convertFromProtoFormat(p.getTaskId());
+ return this.taskId;
+ }
+
+ @Override
+ public void setTaskId(TaskId taskId) {
+ maybeInitBuilder();
+ if (taskId == null)
+ builder.clearTaskId();
+ this.taskId = taskId;
+ }
+ @Override
+ public float getProgress() {
+ TaskReportProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getProgress());
+ }
+
+ @Override
+ public void setProgress(float progress) {
+ maybeInitBuilder();
+ builder.setProgress((progress));
+ }
+ @Override
+ public TaskState getTaskState() {
+ TaskReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasTaskState()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getTaskState());
+ }
+
+ @Override
+ public void setTaskState(TaskState taskState) {
+ maybeInitBuilder();
+ if (taskState == null) {
+ builder.clearTaskState();
+ return;
+ }
+ builder.setTaskState(convertToProtoFormat(taskState));
+ }
+ @Override
+ public List<TaskAttemptId> getRunningAttemptsList() {
+ initRunningAttempts();
+ return this.runningAttempts;
+ }
+ @Override
+ public TaskAttemptId getRunningAttempt(int index) {
+ initRunningAttempts();
+ return this.runningAttempts.get(index);
+ }
+ @Override
+ public int getRunningAttemptsCount() {
+ initRunningAttempts();
+ return this.runningAttempts.size();
+ }
+
+ private void initRunningAttempts() {
+ if (this.runningAttempts != null) {
+ return;
+ }
+ TaskReportProtoOrBuilder p = viaProto ? proto : builder;
+ List<TaskAttemptIdProto> list = p.getRunningAttemptsList();
+ this.runningAttempts = new ArrayList<TaskAttemptId>();
+
+ for (TaskAttemptIdProto c : list) {
+ this.runningAttempts.add(convertFromProtoFormat(c));
+ }
+ }
+
+ @Override
+ public void addAllRunningAttempts(final List<TaskAttemptId> runningAttempts) {
+ if (runningAttempts == null)
+ return;
+ initRunningAttempts();
+ this.runningAttempts.addAll(runningAttempts);
+ }
+
+ private void addRunningAttemptsToProto() {
+ maybeInitBuilder();
+ builder.clearRunningAttempts();
+ if (runningAttempts == null)
+ return;
+ Iterable<TaskAttemptIdProto> iterable = new Iterable<TaskAttemptIdProto>() {
+ @Override
+ public Iterator<TaskAttemptIdProto> iterator() {
+ return new Iterator<TaskAttemptIdProto>() {
+
+ Iterator<TaskAttemptId> iter = runningAttempts.iterator();
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+
+ @Override
+ public TaskAttemptIdProto next() {
+ return convertToProtoFormat(iter.next());
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+
+ }
+ };
+
+ }
+ };
+ builder.addAllRunningAttempts(iterable);
+ }
+ @Override
+ public void addRunningAttempt(TaskAttemptId runningAttempts) {
+ initRunningAttempts();
+ this.runningAttempts.add(runningAttempts);
+ }
+ @Override
+ public void removeRunningAttempt(int index) {
+ initRunningAttempts();
+ this.runningAttempts.remove(index);
+ }
+ @Override
+ public void clearRunningAttempts() {
+ initRunningAttempts();
+ this.runningAttempts.clear();
+ }
+ @Override
+ public TaskAttemptId getSuccessfulAttempt() {
+ TaskReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.successfulAttemptId != null) {
+ return this.successfulAttemptId;
+ }
+ if (!p.hasSuccessfulAttempt()) {
+ return null;
+ }
+ this.successfulAttemptId = convertFromProtoFormat(p.getSuccessfulAttempt());
+ return this.successfulAttemptId;
+ }
+
+ @Override
+ public void setSuccessfulAttempt(TaskAttemptId successfulAttempt) {
+ maybeInitBuilder();
+ if (successfulAttempt == null)
+ builder.clearSuccessfulAttempt();
+ this.successfulAttemptId = successfulAttempt;
+ }
+ @Override
+ public List<String> getDiagnosticsList() {
+ initDiagnostics();
+ return this.diagnostics;
+ }
+ @Override
+ public String getDiagnostics(int index) {
+ initDiagnostics();
+ return this.diagnostics.get(index);
+ }
+ @Override
+ public int getDiagnosticsCount() {
+ initDiagnostics();
+ return this.diagnostics.size();
+ }
+
+ private void initDiagnostics() {
+ if (this.diagnostics != null) {
+ return;
+ }
+ TaskReportProtoOrBuilder p = viaProto ? proto : builder;
+ List<String> list = p.getDiagnosticsList();
+ this.diagnostics = new ArrayList<String>();
+
+ for (String c : list) {
+ this.diagnostics.add(c);
+ }
+ }
+
+ @Override
+ public void addAllDiagnostics(final List<String> diagnostics) {
+ if (diagnostics == null)
+ return;
+ initDiagnostics();
+ this.diagnostics.addAll(diagnostics);
+ }
+
+ private void addDiagnosticsToProto() {
+ maybeInitBuilder();
+ builder.clearDiagnostics();
+ if (diagnostics == null)
+ return;
+ builder.addAllDiagnostics(diagnostics);
+ }
+ @Override
+ public void addDiagnostics(String diagnostics) {
+ initDiagnostics();
+ this.diagnostics.add(diagnostics);
+ }
+ @Override
+ public void removeDiagnostics(int index) {
+ initDiagnostics();
+ this.diagnostics.remove(index);
+ }
+ @Override
+ public void clearDiagnostics() {
+ initDiagnostics();
+ this.diagnostics.clear();
+ }
+
+ private CountersPBImpl convertFromProtoFormat(CountersProto p) {
+ return new CountersPBImpl(p);
+ }
+
+ private CountersProto convertToProtoFormat(Counters t) {
+ return ((CountersPBImpl)t).getProto();
+ }
+
+ private TaskIdPBImpl convertFromProtoFormat(TaskIdProto p) {
+ return new TaskIdPBImpl(p);
+ }
+
+ private TaskIdProto convertToProtoFormat(TaskId t) {
+ return ((TaskIdPBImpl)t).getProto();
+ }
+
+ private TaskStateProto convertToProtoFormat(TaskState e) {
+ return MRProtoUtils.convertToProtoFormat(e);
+ }
+
+ private TaskState convertFromProtoFormat(TaskStateProto e) {
+ return MRProtoUtils.convertFromProtoFormat(e);
+ }
+
+ private TaskAttemptIdPBImpl convertFromProtoFormat(TaskAttemptIdProto p) {
+ return new TaskAttemptIdPBImpl(p);
+ }
+
+ private TaskAttemptIdProto convertToProtoFormat(TaskAttemptId t) {
+ return ((TaskAttemptIdPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/FileNameIndexUtils.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/FileNameIndexUtils.java
new file mode 100644
index 0000000..5eaf0e3
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/FileNameIndexUtils.java
@@ -0,0 +1,249 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.jobhistory;
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.URLDecoder;
+import java.net.URLEncoder;
+
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+
+public class FileNameIndexUtils {
+
+ static final String UNDERSCORE_ESCAPE = "%5F";
+ static final int JOB_NAME_TRIM_LENGTH = 50;
+
+ //This has to be underscore currently. Untill escape uses DELIMITER.
+ static final String DELIMITER = "_";
+
+ private static final int JOB_ID_INDEX = 0;
+ private static final int SUBMIT_TIME_INDEX = 1;
+ private static final int USER_INDEX = 2;
+ private static final int JOB_NAME_INDEX = 3;
+ private static final int FINISH_TIME_INDEX = 4;
+ private static final int NUM_MAPS_INDEX = 5;
+ private static final int NUM_REDUCES_INDEX = 6;
+ private static final int JOB_STATUS_INDEX = 7;
+ private static final int MAX_INDEX = JOB_STATUS_INDEX;
+
+ /**
+ * Constructs the job history file name from the JobIndexInfo.
+ *
+ * @param indexInfo the index info.
+ * @return the done job history filename.
+ */
+ public static String getDoneFileName(JobIndexInfo indexInfo) throws IOException {
+ StringBuilder sb = new StringBuilder();
+ //JobId
+ sb.append(escapeUnderscores(TypeConverter.fromYarn(indexInfo.getJobId()).toString()));
+ sb.append(DELIMITER);
+
+ //StartTime
+ sb.append(indexInfo.getSubmitTime());
+ sb.append(DELIMITER);
+
+ //UserName
+ sb.append(escapeUnderscores(getUserName(indexInfo)));
+ sb.append(DELIMITER);
+
+ //JobName
+ sb.append(escapeUnderscores(trimJobName(getJobName(indexInfo))));
+ sb.append(DELIMITER);
+
+ //FinishTime
+ sb.append(indexInfo.getFinishTime());
+ sb.append(DELIMITER);
+
+ //NumMaps
+ sb.append(indexInfo.getNumMaps());
+ sb.append(DELIMITER);
+
+ //NumReduces
+ sb.append(indexInfo.getNumReduces());
+ sb.append(DELIMITER);
+
+ //JobStatus
+ sb.append(indexInfo.getJobStatus());
+
+ sb.append(JobHistoryUtils.JOB_HISTORY_FILE_EXTENSION);
+ return encodeJobHistoryFileName(sb.toString());
+ }
+
+ /**
+ * Parses the provided job history file name to construct a
+ * JobIndexInfo object which is returned.
+ *
+ * @param jhFileName the job history filename.
+ * @return a JobIndexInfo object built from the filename.
+ */
+ public static JobIndexInfo getIndexInfo(String jhFileName) throws IOException {
+ String fileName = jhFileName.substring(0, jhFileName.indexOf(JobHistoryUtils.JOB_HISTORY_FILE_EXTENSION));
+ JobIndexInfo indexInfo = new JobIndexInfo();
+
+ String[] jobDetails = fileName.split(DELIMITER);
+ if (jobDetails.length != MAX_INDEX +1) {
+ throw new IOException("Failed to parse file: [" + jhFileName + "]. Expected " + (MAX_INDEX + 1) + "parts.");
+ }
+
+ JobID oldJobId = JobID.forName(decodeJobHistoryFileName(jobDetails[JOB_ID_INDEX]));
+ JobId jobId = TypeConverter.toYarn(oldJobId);
+ indexInfo.setJobId(jobId);
+ //TODO Catch NumberFormatException - Do not fail if there's only a few fields missing.
+ indexInfo.setSubmitTime(Long.parseLong(decodeJobHistoryFileName(jobDetails[SUBMIT_TIME_INDEX])));
+
+ indexInfo.setUser(decodeJobHistoryFileName(jobDetails[USER_INDEX]));
+
+ indexInfo.setJobName(decodeJobHistoryFileName(jobDetails[JOB_NAME_INDEX]));
+
+ indexInfo.setFinishTime(Long.parseLong(decodeJobHistoryFileName(jobDetails[FINISH_TIME_INDEX])));
+
+ indexInfo.setNumMaps(Integer.parseInt(decodeJobHistoryFileName(jobDetails[NUM_MAPS_INDEX])));
+
+ indexInfo.setNumReduces(Integer.parseInt(decodeJobHistoryFileName(jobDetails[NUM_REDUCES_INDEX])));
+
+ indexInfo.setJobStatus(decodeJobHistoryFileName(jobDetails[JOB_STATUS_INDEX]));
+
+ return indexInfo;
+ }
+
+
+ /**
+ * Helper function to encode the URL of the filename of the job-history
+ * log file.
+ *
+ * @param logFileName file name of the job-history file
+ * @return URL encoded filename
+ * @throws IOException
+ */
+ public static String encodeJobHistoryFileName(String logFileName)
+ throws IOException {
+ String replacementUnderscoreEscape = null;
+
+ if (logFileName.contains(UNDERSCORE_ESCAPE)) {
+ replacementUnderscoreEscape = nonOccursString(logFileName);
+
+ logFileName = replaceStringInstances
+ (logFileName, UNDERSCORE_ESCAPE, replacementUnderscoreEscape);
+ }
+
+ String encodedFileName = null;
+ try {
+ encodedFileName = URLEncoder.encode(logFileName, "UTF-8");
+ } catch (UnsupportedEncodingException uee) {
+ IOException ioe = new IOException();
+ ioe.initCause(uee);
+ ioe.setStackTrace(uee.getStackTrace());
+ throw ioe;
+ }
+
+ if (replacementUnderscoreEscape != null) {
+ encodedFileName = replaceStringInstances
+ (encodedFileName, replacementUnderscoreEscape, UNDERSCORE_ESCAPE);
+ }
+
+ return encodedFileName;
+ }
+
+ /**
+ * Helper function to decode the URL of the filename of the job-history
+ * log file.
+ *
+ * @param logFileName file name of the job-history file
+ * @return URL decoded filename
+ * @throws IOException
+ */
+ public static String decodeJobHistoryFileName(String logFileName)
+ throws IOException {
+ String decodedFileName = null;
+ try {
+ decodedFileName = URLDecoder.decode(logFileName, "UTF-8");
+ } catch (UnsupportedEncodingException uee) {
+ IOException ioe = new IOException();
+ ioe.initCause(uee);
+ ioe.setStackTrace(uee.getStackTrace());
+ throw ioe;
+ }
+ return decodedFileName;
+ }
+
+ static String nonOccursString(String logFileName) {
+ int adHocIndex = 0;
+
+ String unfoundString = "q" + adHocIndex;
+
+ while (logFileName.contains(unfoundString)) {
+ unfoundString = "q" + ++adHocIndex;
+ }
+
+ return unfoundString + "q";
+ }
+
+ private static String getUserName(JobIndexInfo indexInfo) {
+ return getNonEmptyString(indexInfo.getUser());
+ }
+
+ private static String getJobName(JobIndexInfo indexInfo) {
+ return getNonEmptyString(indexInfo.getJobName());
+ }
+
+ //TODO Maybe handle default values for longs and integers here?
+
+ private static String getNonEmptyString(String in) {
+ if (in == null || in.length() == 0) {
+ in = "NA";
+ }
+ return in;
+ }
+
+ private static String escapeUnderscores(String escapee) {
+ return replaceStringInstances(escapee, "_", UNDERSCORE_ESCAPE);
+ }
+
+ // I tolerate this code because I expect a low number of
+ // occurrences in a relatively short string
+ private static String replaceStringInstances
+ (String logFileName, String old, String replacement) {
+ int index = logFileName.indexOf(old);
+
+ while (index > 0) {
+ logFileName = (logFileName.substring(0, index)
+ + replacement
+ + replaceStringInstances
+ (logFileName.substring(index + old.length()),
+ old, replacement));
+
+ index = logFileName.indexOf(old);
+ }
+
+ return logFileName;
+ }
+
+ /**
+ * Trims the job-name if required
+ */
+ private static String trimJobName(String jobName) {
+ if (jobName.length() > JOB_NAME_TRIM_LENGTH) {
+ jobName = jobName.substring(0, JOB_NAME_TRIM_LENGTH);
+ }
+ return jobName;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHConfig.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHConfig.java
new file mode 100644
index 0000000..8a74d14
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHConfig.java
@@ -0,0 +1,99 @@
+package org.apache.hadoop.mapreduce.v2.jobhistory;
+
+public class JHConfig {
+ public static final String HS_PREFIX = "yarn.server.historyserver.";
+ /** host:port address to which to bind to **/
+ public static final String HS_BIND_ADDRESS = HS_PREFIX + "address";
+
+ public static final String HS_USER_NAME = HS_PREFIX + "kerberos.principal";
+
+ public static final String HS_KEYTAB_FILE = HS_PREFIX + "jeytab.file";
+
+ public static final String DEFAULT_HS_BIND_ADDRESS = "0.0.0.0:10020";
+
+ /** Done Dir for for AppMaster **/
+ public static final String HISTORY_INTERMEDIATE_DONE_DIR_KEY =
+ "yarn.historyfile.intermediateDoneDir";
+
+ /** Done Dir for for AppMaster **/
+ public static final String HISTORY_DONE_DIR_KEY =
+ "yarn.historyfile.doneDir";
+
+ /**
+ * Boolean. Create the base dirs in the JobHistoryEventHandler
+ * Set to false for multi-user clusters.
+ */
+ public static final String CREATE_HISTORY_INTERMEDIATE_BASE_DIR_KEY =
+ "yarn.history.create.intermediate.base.dir";
+
+ /** Done Dir for history server. **/
+ public static final String HISTORY_SERVER_DONE_DIR_KEY =
+ HS_PREFIX + "historyfile.doneDir";
+
+ /**
+ * Size of the job list cache.
+ */
+ public static final String HISTORY_SERVER_JOBLIST_CACHE_SIZE_KEY =
+ HS_PREFIX + "joblist.cache.size";
+
+ /**
+ * Size of the loaded job cache.
+ */
+ public static final String HISTORY_SERVER_LOADED_JOB_CACHE_SIZE_KEY =
+ HS_PREFIX + "loadedjobs.cache.size";
+
+ /**
+ * Size of the date string cache. Effects the number of directories
+ * which will be scanned to find a job.
+ */
+ public static final String HISTORY_SERVER_DATESTRING_CACHE_SIZE_KEY =
+ HS_PREFIX + "datestring.cache.size";
+
+ /**
+ * The time interval in milliseconds for the history server
+ * to wake up and scan for files to be moved.
+ */
+ public static final String HISTORY_SERVER_MOVE_THREAD_INTERVAL =
+ HS_PREFIX + "move.thread.interval";
+
+ /**
+ * The number of threads used to move files.
+ */
+ public static final String HISTORY_SERVER_NUM_MOVE_THREADS =
+ HS_PREFIX + "move.threads.count";
+
+ // Equivalent to 0.20 mapreduce.jobhistory.debug.mode
+ public static final String HISTORY_DEBUG_MODE_KEY = HS_PREFIX + "debug.mode";
+
+ public static final String HISTORY_MAXAGE =
+ "yarn.historyfile.maxage";
+
+ //TODO Move some of the HistoryServer specific out into a separate configuration class.
+ public static final String HS_KEYTAB_KEY = HS_PREFIX + "keytab";
+
+ public static final String HS_SERVER_PRINCIPAL_KEY = "yarn.historyserver.principal";
+
+ public static final String RUN_HISTORY_CLEANER_KEY =
+ HS_PREFIX + "cleaner.run";
+
+ /**
+ * Run interval for the History Cleaner thread.
+ */
+ public static final String HISTORY_CLEANER_RUN_INTERVAL =
+ HS_PREFIX + "cleaner.run.interval";
+
+ public static final String HS_WEBAPP_BIND_ADDRESS = HS_PREFIX +
+ "address.webapp";
+ public static final String DEFAULT_HS_WEBAPP_BIND_ADDRESS =
+ "0.0.0.0:19888";
+
+ public static final String HS_CLIENT_THREADS =
+ HS_PREFIX + "client.threads";
+ public static final int DEFAULT_HS_CLIENT_THREADS = 10;
+
+//From JTConfig. May need to be moved elsewhere.
+ public static final String JOBHISTORY_TASKPROGRESS_NUMBER_SPLITS_KEY =
+ "mapreduce.jobtracker.jobhistory.task.numberprogresssplits";
+
+ public static int DEFAULT_NUMBER_PROGRESS_SPLITS = 12;
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
new file mode 100644
index 0000000..ae87f58
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
@@ -0,0 +1,499 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.jobhistory;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
+import java.util.Calendar;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.v2.MRConstants;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+
+public class JobHistoryUtils {
+
+ /**
+ * Permissions for the history staging dir while JobInProgress.
+ */
+ public static final FsPermission HISTORY_STAGING_DIR_PERMISSIONS =
+
+ FsPermission.createImmutable( (short) 0700);
+
+ /**
+ * Permissions for the user directory under the staging directory.
+ */
+ public static final FsPermission HISTORY_STAGING_USER_DIR_PERMISSIONS =
+ FsPermission.createImmutable((short) 0700);
+
+
+
+ /**
+ * Permissions for the history done dir and derivatives.
+ */
+ public static final FsPermission HISTORY_DONE_DIR_PERMISSION =
+ FsPermission.createImmutable((short) 0770);
+
+ public static final FsPermission HISTORY_DONE_FILE_PERMISSION =
+ FsPermission.createImmutable((short) 0770); // rwx------
+
+ /**
+ * Permissions for the intermediate done directory.
+ */
+ public static final FsPermission HISTORY_INTERMEDIATE_DONE_DIR_PERMISSIONS =
+ FsPermission.createImmutable((short) 01777);
+
+ /**
+ * Permissions for the user directory under the intermediate done directory.
+ */
+ public static final FsPermission HISTORY_INTERMEDIATE_USER_DIR_PERMISSIONS =
+ FsPermission.createImmutable((short) 0770);
+
+ public static final FsPermission HISTORY_INTERMEDIATE_FILE_PERMISSIONS =
+ FsPermission.createImmutable((short) 0770); // rwx------
+
+ /**
+ * Suffix for configuration files.
+ */
+ public static final String CONF_FILE_NAME_SUFFIX = "_conf.xml";
+
+ /**
+ * Suffix for summary files.
+ */
+ public static final String SUMMARY_FILE_NAME_SUFFIX = ".summary";
+
+ /**
+ * Job History File extension.
+ */
+ public static final String JOB_HISTORY_FILE_EXTENSION = ".jhist";
+
+ public static final int VERSION = 4;
+
+ public static final int SERIAL_NUMBER_DIRECTORY_DIGITS = 6;
+
+ public static final String TIMESTAMP_DIR_REGEX = "\\d{4}" + "\\" + File.separator + "\\d{2}" + "\\" + File.separator + "\\d{2}";
+ public static final Pattern TIMESTAMP_DIR_PATTERN = Pattern.compile(TIMESTAMP_DIR_REGEX);
+ private static final String TIMESTAMP_DIR_FORMAT = "%04d" + File.separator + "%02d" + File.separator + "%02d";
+
+ private static final PathFilter CONF_FILTER = new PathFilter() {
+ @Override
+ public boolean accept(Path path) {
+ return path.getName().endsWith(CONF_FILE_NAME_SUFFIX);
+ }
+ };
+
+ private static final PathFilter JOB_HISTORY_FILE_FILTER = new PathFilter() {
+ @Override
+ public boolean accept(Path path) {
+ return path.getName().endsWith(JOB_HISTORY_FILE_EXTENSION);
+ }
+ };
+
+ /**
+ * Checks whether the provided path string is a valid job history file.
+ * @param pathString the path to be checked.
+ * @return
+ */
+ public static boolean isValidJobHistoryFileName(String pathString) {
+ return pathString.endsWith(JOB_HISTORY_FILE_EXTENSION);
+ }
+
+ /**
+ * Returns the jobId from a job history file name.
+ * @param pathString the path string.
+ * @return the JobId
+ * @throws IOException if the filename format is invalid.
+ */
+ public static JobID getJobIDFromHistoryFilePath(String pathString) throws IOException {
+ String [] parts = pathString.split(Path.SEPARATOR);
+ String fileNamePart = parts[parts.length -1];
+ JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(fileNamePart);
+ return TypeConverter.fromYarn(jobIndexInfo.getJobId());
+ }
+
+ /**
+ * Gets a PathFilter which would match configuration files.
+ * @return
+ */
+ public static PathFilter getConfFileFilter() {
+ return CONF_FILTER;
+ }
+
+ /**
+ * Gets a PathFilter which would match job history file names.
+ * @return
+ */
+ public static PathFilter getHistoryFileFilter() {
+ return JOB_HISTORY_FILE_FILTER;
+ }
+
+ /**
+ * Gets the configured directory prefix for In Progress history files.
+ * @param conf
+ * @return A string representation of the prefix.
+ */
+ public static String
+ getConfiguredHistoryStagingDirPrefix(Configuration conf)
+ throws IOException {
+ String user = UserGroupInformation.getCurrentUser().getShortUserName();
+ Path path = MRApps.getStagingAreaDir(conf, user);
+ String logDir = path.toString();
+ return logDir;
+ }
+
+ /**
+ * Gets the configured directory prefix for intermediate done history files.
+ * @param conf
+ * @return A string representation of the prefix.
+ */
+ public static String getConfiguredHistoryIntermediateDoneDirPrefix(
+ Configuration conf) {
+ String doneDirPrefix = conf
+ .get(JHConfig.HISTORY_INTERMEDIATE_DONE_DIR_KEY);
+ if (doneDirPrefix == null) {
+ doneDirPrefix = conf.get(MRConstants.APPS_STAGING_DIR_KEY)
+ + "/history/done_intermediate";
+ }
+ return doneDirPrefix;
+ }
+
+ /**
+ * Gets the configured directory prefix for Done history files.
+ * @param conf
+ * @return
+ */
+ public static String getConfiguredHistoryServerDoneDirPrefix(
+ Configuration conf) {
+ String doneDirPrefix = conf.get(JHConfig.HISTORY_DONE_DIR_KEY);
+ if (doneDirPrefix == null) {
+ doneDirPrefix = conf.get(MRConstants.APPS_STAGING_DIR_KEY)
+ + "/history/done";
+ }
+ return doneDirPrefix;
+ }
+
+ /**
+ * Gets the user directory for intermediate done history files.
+ * @param conf
+ * @return
+ */
+ public static String getHistoryIntermediateDoneDirForUser(Configuration conf) throws IOException {
+ return getConfiguredHistoryIntermediateDoneDirPrefix(conf) + File.separator
+ + UserGroupInformation.getCurrentUser().getShortUserName();
+ }
+
+ public static boolean shouldCreateNonUserDirectory(Configuration conf) {
+ // Returning true by default to allow non secure single node clusters to work
+ // without any configuration change.
+ return conf.getBoolean(JHConfig.CREATE_HISTORY_INTERMEDIATE_BASE_DIR_KEY, true);
+ }
+
+ /**
+ * Get the job history file path for non Done history files.
+ */
+ public static Path getStagingJobHistoryFile(Path dir, JobId jobId, int attempt) {
+ return getStagingJobHistoryFile(dir, TypeConverter.fromYarn(jobId).toString(), attempt);
+ }
+
+ /**
+ * Get the job history file path for non Done history files.
+ */
+ public static Path getStagingJobHistoryFile(Path dir, String jobId, int attempt) {
+ return new Path(dir, jobId + "_" +
+ attempt + JOB_HISTORY_FILE_EXTENSION);
+ }
+
+ /**
+ * Get the done configuration file name for a job.
+ * @param jobId the jobId.
+ * @return the conf file name.
+ */
+ public static String getIntermediateConfFileName(JobId jobId) {
+ return TypeConverter.fromYarn(jobId).toString() + CONF_FILE_NAME_SUFFIX;
+ }
+
+ /**
+ * Get the done summary file name for a job.
+ * @param jobId the jobId.
+ * @return the conf file name.
+ */
+ public static String getIntermediateSummaryFileName(JobId jobId) {
+ return TypeConverter.fromYarn(jobId).toString() + SUMMARY_FILE_NAME_SUFFIX;
+ }
+
+ /**
+ * Gets the conf file path for jobs in progress.
+ *
+ * @param logDir the log directory prefix.
+ * @param jobId the jobId.
+ * @param attempt attempt number for this job.
+ * @return
+ */
+ public static Path getStagingConfFile(Path logDir, JobId jobId, int attempt) {
+ Path jobFilePath = null;
+ if (logDir != null) {
+ jobFilePath = new Path(logDir, TypeConverter.fromYarn(jobId).toString()
+ + "_" + attempt + CONF_FILE_NAME_SUFFIX);
+ }
+ return jobFilePath;
+ }
+
+ /**
+ * Gets the serial number part of the path based on the jobId and serialNumber format.
+ * @param id
+ * @param serialNumberFormat
+ * @return
+ */
+ public static String serialNumberDirectoryComponent(JobId id, String serialNumberFormat) {
+ return String.format(serialNumberFormat,
+ Integer.valueOf(jobSerialNumber(id))).substring(0,
+ SERIAL_NUMBER_DIRECTORY_DIGITS);
+ }
+
+ /**Extracts the timstamp component from the path.
+ * @param path
+ * @return
+ */
+ public static String getTimestampPartFromPath(String path) {
+ Matcher matcher = TIMESTAMP_DIR_PATTERN.matcher(path);
+ if (matcher.find()) {
+ String matched = matcher.group();
+ String ret = matched.intern();
+ return ret;
+ } else {
+ return null;
+ }
+ }
+
+ /**
+ * Gets the history subdirectory based on the jobId, timestamp and serial number format.
+ * @param id
+ * @param timestampComponent
+ * @param serialNumberFormat
+ * @return
+ */
+ public static String historyLogSubdirectory(JobId id, String timestampComponent, String serialNumberFormat) {
+// String result = LOG_VERSION_STRING;
+ String result = "";
+ String serialNumberDirectory = serialNumberDirectoryComponent(id, serialNumberFormat);
+
+ result = result
+ + timestampComponent
+ + File.separator + serialNumberDirectory
+ + File.separator;
+
+ return result;
+ }
+
+ /**
+ * Gets the timestamp component based on millisecond time.
+ * @param millisecondTime
+ * @param debugMode
+ * @return
+ */
+ public static String timestampDirectoryComponent(long millisecondTime, boolean debugMode) {
+ Calendar timestamp = Calendar.getInstance();
+ timestamp.setTimeInMillis(millisecondTime);
+ String dateString = null;
+ dateString = String.format(
+ TIMESTAMP_DIR_FORMAT,
+ timestamp.get(Calendar.YEAR),
+ // months are 0-based in Calendar, but people will expect January
+ // to be month #1.
+ timestamp.get(debugMode ? Calendar.HOUR : Calendar.MONTH) + 1,
+ timestamp.get(debugMode ? Calendar.MINUTE : Calendar.DAY_OF_MONTH));
+ dateString = dateString.intern();
+ return dateString;
+ }
+
+ public static String doneSubdirsBeforeSerialTail() {
+ // date
+ String result = "/*/*/*"; // YYYY/MM/DD ;
+ return result;
+ }
+
+ /**
+ * Computes a serial number used as part of directory naming for the given jobId.
+ * @param id the jobId.
+ * @return
+ */
+ public static int jobSerialNumber(JobId id) {
+ return id.getId();
+ }
+
+ public static List<FileStatus> localGlobber(FileContext fc, Path root, String tail)
+ throws IOException {
+ return localGlobber(fc, root, tail, null);
+ }
+
+ public static List<FileStatus> localGlobber(FileContext fc, Path root, String tail,
+ PathFilter filter) throws IOException {
+ return localGlobber(fc, root, tail, filter, null);
+ }
+
+ // hasMismatches is just used to return a second value if you want
+ // one. I would have used MutableBoxedBoolean if such had been provided.
+ public static List<FileStatus> localGlobber(FileContext fc, Path root, String tail,
+ PathFilter filter, AtomicBoolean hasFlatFiles) throws IOException {
+ if (tail.equals("")) {
+ return (listFilteredStatus(fc, root, filter));
+ }
+
+ if (tail.startsWith("/*")) {
+ Path[] subdirs = filteredStat2Paths(
+ remoteIterToList(fc.listStatus(root)), true, hasFlatFiles);
+
+ List<List<FileStatus>> subsubdirs = new LinkedList<List<FileStatus>>();
+
+ int subsubdirCount = 0;
+
+ if (subdirs.length == 0) {
+ return new LinkedList<FileStatus>();
+ }
+
+ String newTail = tail.substring(2);
+
+ for (int i = 0; i < subdirs.length; ++i) {
+ subsubdirs.add(localGlobber(fc, subdirs[i], newTail, filter, null));
+ // subsubdirs.set(i, localGlobber(fc, subdirs[i], newTail, filter,
+ // null));
+ subsubdirCount += subsubdirs.get(i).size();
+ }
+
+ List<FileStatus> result = new LinkedList<FileStatus>();
+
+ for (int i = 0; i < subsubdirs.size(); ++i) {
+ result.addAll(subsubdirs.get(i));
+ }
+
+ return result;
+ }
+
+ if (tail.startsWith("/")) {
+ int split = tail.indexOf('/', 1);
+
+ if (split < 0) {
+ return listFilteredStatus(fc, new Path(root, tail.substring(1)), filter);
+ } else {
+ String thisSegment = tail.substring(1, split);
+ String newTail = tail.substring(split);
+ return localGlobber(fc, new Path(root, thisSegment), newTail, filter,
+ hasFlatFiles);
+ }
+ }
+
+ IOException e = new IOException("localGlobber: bad tail");
+
+ throw e;
+ }
+
+ private static List<FileStatus> listFilteredStatus(FileContext fc, Path root,
+ PathFilter filter) throws IOException {
+ List<FileStatus> fsList = remoteIterToList(fc.listStatus(root));
+ if (filter == null) {
+ return fsList;
+ } else {
+ List<FileStatus> filteredList = new LinkedList<FileStatus>();
+ for (FileStatus fs : fsList) {
+ if (filter.accept(fs.getPath())) {
+ filteredList.add(fs);
+ }
+ }
+ return filteredList;
+ }
+ }
+
+ private static List<FileStatus> remoteIterToList(
+ RemoteIterator<FileStatus> rIter) throws IOException {
+ List<FileStatus> fsList = new LinkedList<FileStatus>();
+ if (rIter == null)
+ return fsList;
+ while (rIter.hasNext()) {
+ fsList.add(rIter.next());
+ }
+ return fsList;
+ }
+
+ // hasMismatches is just used to return a second value if you want
+ // one. I would have used MutableBoxedBoolean if such had been provided.
+ private static Path[] filteredStat2Paths(List<FileStatus> stats, boolean dirs,
+ AtomicBoolean hasMismatches) {
+ int resultCount = 0;
+
+ if (hasMismatches == null) {
+ hasMismatches = new AtomicBoolean(false);
+ }
+
+ for (int i = 0; i < stats.size(); ++i) {
+ if (stats.get(i).isDirectory() == dirs) {
+ stats.set(resultCount++, stats.get(i));
+ } else {
+ hasMismatches.set(true);
+ }
+ }
+
+ Path[] result = new Path[resultCount];
+ for (int i = 0; i < resultCount; i++) {
+ result[i] = stats.get(i).getPath();
+ }
+
+ return result;
+ }
+
+ public static String getHistoryUrl(Configuration conf, ApplicationId appId)
+ throws UnknownHostException {
+ //construct the history url for job
+ String hsAddress = conf.get(JHConfig.HS_WEBAPP_BIND_ADDRESS,
+ JHConfig.DEFAULT_HS_WEBAPP_BIND_ADDRESS);
+ InetSocketAddress address = NetUtils.createSocketAddr(hsAddress);
+ StringBuffer sb = new StringBuffer();
+ if (address.getAddress().isAnyLocalAddress() ||
+ address.getAddress().isLoopbackAddress()) {
+ sb.append(InetAddress.getLocalHost().getHostAddress());
+ } else {
+ sb.append(address.getHostName());
+ }
+ sb.append(":").append(address.getPort());
+ sb.append("/yarn/job/"); // TODO This will change when the history server
+ // understands apps.
+ // TOOD Use JobId toString once UI stops using _id_id
+ sb.append("job_").append(appId.getClusterTimestamp());
+ sb.append("_").append(appId.getId()).append("_").append(appId.getId());
+ return sb.toString();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobIndexInfo.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobIndexInfo.java
new file mode 100644
index 0000000..9f83b70
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobIndexInfo.java
@@ -0,0 +1,110 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.jobhistory;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+
+/**
+ * Maintains information which may be used by the jobHistroy indexing
+ * system.
+ */
+public class JobIndexInfo {
+ private long submitTime;
+ private long finishTime;
+ private String user;
+ private String jobName;
+ private JobId jobId;
+ private int numMaps;
+ private int numReduces;
+ private String jobStatus;
+
+ public JobIndexInfo() {
+ }
+
+ public JobIndexInfo(long submitTime, long finishTime, String user,
+ String jobName, JobId jobId, int numMaps, int numReduces, String jobStatus) {
+ this.submitTime = submitTime;
+ this.finishTime = finishTime;
+ this.user = user;
+ this.jobName = jobName;
+ this.jobId = jobId;
+ this.numMaps = numMaps;
+ this.numReduces = numReduces;
+ this.jobStatus = jobStatus;
+ }
+
+ public long getSubmitTime() {
+ return submitTime;
+ }
+ public void setSubmitTime(long submitTime) {
+ this.submitTime = submitTime;
+ }
+ public long getFinishTime() {
+ return finishTime;
+ }
+ public void setFinishTime(long finishTime) {
+ this.finishTime = finishTime;
+ }
+ public String getUser() {
+ return user;
+ }
+ public void setUser(String user) {
+ this.user = user;
+ }
+ public String getJobName() {
+ return jobName;
+ }
+ public void setJobName(String jobName) {
+ this.jobName = jobName;
+ }
+ public JobId getJobId() {
+ return jobId;
+ }
+ public void setJobId(JobId jobId) {
+ this.jobId = jobId;
+ }
+ public int getNumMaps() {
+ return numMaps;
+ }
+ public void setNumMaps(int numMaps) {
+ this.numMaps = numMaps;
+ }
+ public int getNumReduces() {
+ return numReduces;
+ }
+ public void setNumReduces(int numReduces) {
+ this.numReduces = numReduces;
+ }
+ public String getJobStatus() {
+ return jobStatus;
+ }
+ public void setJobStatus(String jobStatus) {
+ this.jobStatus = jobStatus;
+ }
+
+ @Override
+ public String toString() {
+ return "JobIndexInfo [submitTime=" + submitTime + ", finishTime="
+ + finishTime + ", user=" + user + ", jobName=" + jobName + ", jobId="
+ + jobId + ", numMaps=" + numMaps + ", numReduces=" + numReduces
+ + ", jobStatus=" + jobStatus + "]";
+ }
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/ClientHSSecurityInfo.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/ClientHSSecurityInfo.java
new file mode 100644
index 0000000..2f25070
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/ClientHSSecurityInfo.java
@@ -0,0 +1,62 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.security.client;
+
+import java.lang.annotation.Annotation;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JHConfig;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.yarn.proto.MRClientProtocol;
+
+public class ClientHSSecurityInfo extends SecurityInfo {
+
+ @Override
+ public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
+ if (!protocol
+ .equals(MRClientProtocol.MRClientProtocolService.BlockingInterface.class)) {
+ return null;
+ }
+ return new KerberosInfo() {
+
+ @Override
+ public Class<? extends Annotation> annotationType() {
+ return null;
+ }
+
+ @Override
+ public String serverPrincipal() {
+ return JHConfig.HS_SERVER_PRINCIPAL_KEY;
+ }
+
+ @Override
+ public String clientPrincipal() {
+ return null;
+ }
+ };
+ }
+
+ @Override
+ public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
+ return null;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
new file mode 100644
index 0000000..988a502
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
@@ -0,0 +1,224 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.util;
+
+import static org.apache.hadoop.yarn.util.StringHelper._join;
+import static org.apache.hadoop.yarn.util.StringHelper._split;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.v2.MRConstants;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.util.Apps;
+
+/**
+ * Helper class for MR applications
+ */
+public class MRApps extends Apps {
+ public static final String JOB = "job";
+ public static final String TASK = "task";
+ public static final String ATTEMPT = "attempt";
+
+ public static String toString(JobId jid) {
+ return _join(JOB, jid.getAppId().getClusterTimestamp(), jid.getAppId().getId(), jid.getId());
+ }
+
+ public static JobId toJobID(String jid) {
+ Iterator<String> it = _split(jid).iterator();
+ return toJobID(JOB, jid, it);
+ }
+
+ // mostly useful for parsing task/attempt id like strings
+ public static JobId toJobID(String prefix, String s, Iterator<String> it) {
+ ApplicationId appId = toAppID(prefix, s, it);
+ shouldHaveNext(prefix, s, it);
+ JobId jobId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class);
+ jobId.setAppId(appId);
+ jobId.setId(Integer.parseInt(it.next()));
+ return jobId;
+ }
+
+ public static String toString(TaskId tid) {
+ return _join("task", tid.getJobId().getAppId().getClusterTimestamp(), tid.getJobId().getAppId().getId(),
+ tid.getJobId().getId(), taskSymbol(tid.getTaskType()), tid.getId());
+ }
+
+ public static TaskId toTaskID(String tid) {
+ Iterator<String> it = _split(tid).iterator();
+ return toTaskID(TASK, tid, it);
+ }
+
+ public static TaskId toTaskID(String prefix, String s, Iterator<String> it) {
+ JobId jid = toJobID(prefix, s, it);
+ shouldHaveNext(prefix, s, it);
+ TaskId tid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class);
+ tid.setJobId(jid);
+ tid.setTaskType(taskType(it.next()));
+ shouldHaveNext(prefix, s, it);
+ tid.setId(Integer.parseInt(it.next()));
+ return tid;
+ }
+
+ public static String toString(TaskAttemptId taid) {
+ return _join("attempt", taid.getTaskId().getJobId().getAppId().getClusterTimestamp(),
+ taid.getTaskId().getJobId().getAppId().getId(), taid.getTaskId().getJobId().getId(),
+ taskSymbol(taid.getTaskId().getTaskType()), taid.getTaskId().getId(), taid.getId());
+ }
+
+ public static TaskAttemptId toTaskAttemptID(String taid) {
+ Iterator<String> it = _split(taid).iterator();
+ TaskId tid = toTaskID(ATTEMPT, taid, it);
+ shouldHaveNext(ATTEMPT, taid, it);
+ TaskAttemptId taId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskAttemptId.class);
+ taId.setTaskId(tid);
+ taId.setId(Integer.parseInt(it.next()));
+ return taId;
+ }
+
+ public static String taskSymbol(TaskType type) {
+ switch (type) {
+ case MAP: return "m";
+ case REDUCE: return "r";
+ }
+ throw new YarnException("Unknown task type: "+ type.toString());
+ }
+
+ public static enum TaskAttemptStateUI {
+ NEW(
+ new TaskAttemptState[] { TaskAttemptState.NEW,
+ TaskAttemptState.UNASSIGNED, TaskAttemptState.ASSIGNED }),
+ RUNNING(
+ new TaskAttemptState[] { TaskAttemptState.RUNNING,
+ TaskAttemptState.COMMIT_PENDING,
+ TaskAttemptState.SUCCESS_CONTAINER_CLEANUP,
+ TaskAttemptState.FAIL_CONTAINER_CLEANUP,
+ TaskAttemptState.FAIL_TASK_CLEANUP,
+ TaskAttemptState.KILL_CONTAINER_CLEANUP,
+ TaskAttemptState.KILL_TASK_CLEANUP }),
+ SUCCESSFUL(new TaskAttemptState[] { TaskAttemptState.SUCCEEDED}),
+ FAILED(new TaskAttemptState[] { TaskAttemptState.FAILED}),
+ KILLED(new TaskAttemptState[] { TaskAttemptState.KILLED});
+
+ private final List<TaskAttemptState> correspondingStates;
+
+ private TaskAttemptStateUI(TaskAttemptState[] correspondingStates) {
+ this.correspondingStates = Arrays.asList(correspondingStates);
+ }
+
+ public boolean correspondsTo(TaskAttemptState state) {
+ return this.correspondingStates.contains(state);
+ }
+ }
+
+ public static TaskType taskType(String symbol) {
+ // JDK 7 supports switch on strings
+ if (symbol.equals("m")) return TaskType.MAP;
+ if (symbol.equals("r")) return TaskType.REDUCE;
+ throw new YarnException("Unknown task symbol: "+ symbol);
+ }
+
+ public static TaskAttemptStateUI taskAttemptState(String attemptStateStr) {
+ return TaskAttemptStateUI.valueOf(attemptStateStr);
+ }
+
+ public static void setInitialClasspath(
+ Map<String, String> environment) throws IOException {
+ InputStream classpathFileStream = null;
+ try {
+ // Get yarn mapreduce-app classpath from generated classpath
+ // Works if compile time env is same as runtime. Mainly tests.
+ ClassLoader thisClassLoader =
+ Thread.currentThread().getContextClassLoader();
+ String mrAppGeneratedClasspathFile = "mrapp-generated-classpath";
+ classpathFileStream =
+ thisClassLoader.getResourceAsStream(mrAppGeneratedClasspathFile);
+ BufferedReader reader =
+ new BufferedReader(new InputStreamReader(classpathFileStream));
+ String cp = reader.readLine();
+ if (cp != null) {
+ addToClassPath(environment, cp.trim());
+ }
+ // Put the file itself on classpath for tasks.
+ addToClassPath(environment,
+ thisClassLoader.getResource(mrAppGeneratedClasspathFile).getFile());
+
+ // If runtime env is different.
+ if (System.getenv().get("YARN_HOME") != null) {
+ ShellCommandExecutor exec =
+ new ShellCommandExecutor(new String[] {
+ System.getenv().get("YARN_HOME") + "/bin/yarn",
+ "classpath" });
+ exec.execute();
+ addToClassPath(environment, exec.getOutput().trim());
+ }
+
+ // Get yarn mapreduce-app classpath
+ if (System.getenv().get("HADOOP_MAPRED_HOME")!= null) {
+ ShellCommandExecutor exec =
+ new ShellCommandExecutor(new String[] {
+ System.getenv().get("HADOOP_MAPRED_HOME") + "/bin/mapred",
+ "classpath" });
+ exec.execute();
+ addToClassPath(environment, exec.getOutput().trim());
+ }
+ } finally {
+ if (classpathFileStream != null) {
+ classpathFileStream.close();
+ }
+ }
+ // TODO: Remove duplicates.
+ }
+
+ public static void addToClassPath(
+ Map<String, String> environment, String fileName) {
+ String classpath = environment.get(CLASSPATH);
+ if (classpath == null) {
+ classpath = fileName;
+ } else {
+ classpath = classpath + ":" + fileName;
+ }
+ environment.put(CLASSPATH, classpath);
+ }
+
+ public static final String CLASSPATH = "CLASSPATH";
+
+ private static final String STAGING_CONSTANT = ".staging";
+ public static Path getStagingAreaDir(Configuration conf, String user) {
+ return new Path(
+ conf.get(MRConstants.APPS_STAGING_DIR_KEY) +
+ Path.SEPARATOR + user + Path.SEPARATOR + STAGING_CONSTANT);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRProtoUtils.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRProtoUtils.java
new file mode 100644
index 0000000..5f85a44
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRProtoUtils.java
@@ -0,0 +1,82 @@
+package org.apache.hadoop.mapreduce.v2.util;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.Phase;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobStateProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.PhaseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptCompletionEventStatusProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptStateProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskStateProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskTypeProto;
+
+public class MRProtoUtils {
+
+ /*
+ * JobState
+ */
+ private static String JOB_STATE_PREFIX = "J_";
+ public static JobStateProto convertToProtoFormat(JobState e) {
+ return JobStateProto.valueOf(JOB_STATE_PREFIX + e.name());
+ }
+ public static JobState convertFromProtoFormat(JobStateProto e) {
+ return JobState.valueOf(e.name().replace(JOB_STATE_PREFIX, ""));
+ }
+
+ /*
+ * Phase
+ */
+ private static String PHASE_PREFIX = "P_";
+ public static PhaseProto convertToProtoFormat(Phase e) {
+ return PhaseProto.valueOf(PHASE_PREFIX + e.name());
+ }
+ public static Phase convertFromProtoFormat(PhaseProto e) {
+ return Phase.valueOf(e.name().replace(PHASE_PREFIX, ""));
+ }
+
+ /*
+ * TaskAttemptCompletionEventStatus
+ */
+ private static String TACE_PREFIX = "TACE_";
+ public static TaskAttemptCompletionEventStatusProto convertToProtoFormat(TaskAttemptCompletionEventStatus e) {
+ return TaskAttemptCompletionEventStatusProto.valueOf(TACE_PREFIX + e.name());
+ }
+ public static TaskAttemptCompletionEventStatus convertFromProtoFormat(TaskAttemptCompletionEventStatusProto e) {
+ return TaskAttemptCompletionEventStatus.valueOf(e.name().replace(TACE_PREFIX, ""));
+ }
+
+ /*
+ * TaskAttemptState
+ */
+ private static String TASK_ATTEMPT_STATE_PREFIX = "TA_";
+ public static TaskAttemptStateProto convertToProtoFormat(TaskAttemptState e) {
+ return TaskAttemptStateProto.valueOf(TASK_ATTEMPT_STATE_PREFIX + e.name());
+ }
+ public static TaskAttemptState convertFromProtoFormat(TaskAttemptStateProto e) {
+ return TaskAttemptState.valueOf(e.name().replace(TASK_ATTEMPT_STATE_PREFIX, ""));
+ }
+
+ /*
+ * TaskState
+ */
+ private static String TASK_STATE_PREFIX = "TS_";
+ public static TaskStateProto convertToProtoFormat(TaskState e) {
+ return TaskStateProto.valueOf(TASK_STATE_PREFIX + e.name());
+ }
+ public static TaskState convertFromProtoFormat(TaskStateProto e) {
+ return TaskState.valueOf(e.name().replace(TASK_STATE_PREFIX, ""));
+ }
+
+ /*
+ * TaskType
+ */
+ public static TaskTypeProto convertToProtoFormat(TaskType e) {
+ return TaskTypeProto.valueOf(e.name());
+ }
+ public static TaskType convertFromProtoFormat(TaskTypeProto e) {
+ return TaskType.valueOf(e.name());
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/proto/MRClientProtocol.proto b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/proto/MRClientProtocol.proto
new file mode 100644
index 0000000..f644260
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/proto/MRClientProtocol.proto
@@ -0,0 +1,20 @@
+option java_package = "org.apache.hadoop.yarn.proto";
+option java_outer_classname = "MRClientProtocol";
+option java_generic_services = true;
+
+import "mr_service_protos.proto";
+
+service MRClientProtocolService {
+ rpc getJobReport (GetJobReportRequestProto) returns (GetJobReportResponseProto);
+ rpc getTaskReport (GetTaskReportRequestProto) returns (GetTaskReportResponseProto);
+ rpc getTaskAttemptReport (GetTaskAttemptReportRequestProto) returns (GetTaskAttemptReportResponseProto);
+ rpc getCounters (GetCountersRequestProto) returns (GetCountersResponseProto);
+ rpc getTaskAttemptCompletionEvents (GetTaskAttemptCompletionEventsRequestProto) returns (GetTaskAttemptCompletionEventsResponseProto);
+ rpc getTaskReports (GetTaskReportsRequestProto) returns (GetTaskReportsResponseProto);
+ rpc getDiagnostics (GetDiagnosticsRequestProto) returns (GetDiagnosticsResponseProto);
+
+ rpc killJob (KillJobRequestProto) returns (KillJobResponseProto);
+ rpc killTask (KillTaskRequestProto) returns (KillTaskResponseProto);
+ rpc killTaskAttempt (KillTaskAttemptRequestProto) returns (KillTaskAttemptResponseProto);
+ rpc failTaskAttempt (FailTaskAttemptRequestProto) returns (FailTaskAttemptResponseProto);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto
new file mode 100644
index 0000000..268540b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto
@@ -0,0 +1,150 @@
+option java_package = "org.apache.hadoop.mapreduce.v2.proto";
+option java_outer_classname = "MRProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+
+import "yarn_protos.proto";
+
+enum TaskTypeProto {
+ MAP = 1;
+ REDUCE = 2;
+}
+
+message JobIdProto {
+ optional ApplicationIdProto app_id = 1;
+ optional int32 id = 2;
+}
+
+message TaskIdProto {
+ optional JobIdProto job_id = 1;
+ optional TaskTypeProto task_type = 2;
+ optional int32 id = 3;
+}
+
+message TaskAttemptIdProto {
+ optional TaskIdProto task_id = 1;
+ optional int32 id = 2;
+}
+
+enum TaskStateProto {
+ TS_NEW = 1;
+ TS_SCHEDULED = 2;
+ TS_RUNNING = 3;
+ TS_SUCCEEDED = 4;
+ TS_FAILED = 5;
+ TS_KILL_WAIT = 6;
+ TS_KILLED = 7;
+}
+
+enum PhaseProto {
+ P_STARTING = 1;
+ P_MAP = 2;
+ P_SHUFFLE = 3;
+ P_SORT = 4;
+ P_REDUCE = 5;
+ P_CLEANUP = 6;
+}
+
+message CounterProto {
+ optional string name = 1;
+ optional string display_name = 2;
+ optional int64 value = 3;
+}
+
+message CounterGroupProto {
+ optional string name = 1;
+ optional string display_name = 2;
+ repeated StringCounterMapProto counters = 3;
+}
+
+message CountersProto {
+ repeated StringCounterGroupMapProto counter_groups = 1;
+}
+
+message TaskReportProto {
+ optional TaskIdProto task_id = 1;
+ optional TaskStateProto task_state = 2;
+ optional float progress = 3;
+ optional int64 start_time = 4;
+ optional int64 finish_time = 5;
+ optional CountersProto counters = 6;
+ repeated TaskAttemptIdProto running_attempts = 7;
+ optional TaskAttemptIdProto successful_attempt = 8;
+ repeated string diagnostics = 9;
+}
+
+enum TaskAttemptStateProto {
+ TA_NEW = 1;
+ TA_UNASSIGNED = 2;
+ TA_ASSIGNED = 3;
+ TA_RUNNING = 4;
+ TA_COMMIT_PENDING = 5;
+ TA_SUCCESS_CONTAINER_CLEANUP = 6;
+ TA_SUCCEEDED = 7;
+ TA_FAIL_CONTAINER_CLEANUP = 8;
+ TA_FAIL_TASK_CLEANUP = 9;
+ TA_FAILED = 10;
+ TA_KILL_CONTAINER_CLEANUP = 11;
+ TA_KILL_TASK_CLEANUP = 12;
+ TA_KILLED = 13;
+}
+
+message TaskAttemptReportProto {
+ optional TaskAttemptIdProto task_attempt_id = 1;
+ optional TaskAttemptStateProto task_attempt_state = 2;
+ optional float progress = 3;
+ optional int64 start_time = 4;
+ optional int64 finish_time = 5;
+ optional CountersProto counters = 6;
+ optional string diagnostic_info = 7;
+ optional string state_string = 8;
+ optional PhaseProto phase = 9;
+}
+
+enum JobStateProto {
+ J_NEW = 1;
+ J_INITED = 2;
+ J_RUNNING = 3;
+ J_SUCCEEDED = 4;
+ J_FAILED = 5;
+ J_KILL_WAIT = 6;
+ J_KILLED = 7;
+ J_ERROR = 8;
+}
+
+message JobReportProto {
+ optional JobIdProto job_id = 1;
+ optional JobStateProto job_state = 2;
+ optional float map_progress = 3;
+ optional float reduce_progress = 4;
+ optional float cleanup_progress = 5;
+ optional float setup_progress = 6;
+ optional int64 start_time = 7;
+ optional int64 finish_time = 8;
+}
+
+enum TaskAttemptCompletionEventStatusProto {
+ TACE_FAILED = 1;
+ TACE_KILLED = 2;
+ TACE_SUCCEEDED = 3;
+ TACE_OBSOLETE = 4;
+ TACE_TIPFAILED = 5;
+}
+
+message TaskAttemptCompletionEventProto {
+ optional TaskAttemptIdProto attempt_id = 1;
+ optional TaskAttemptCompletionEventStatusProto status = 2;
+ optional string map_output_server_address = 3;
+ optional int32 attempt_run_time = 4;
+ optional int32 event_id = 5;
+}
+
+message StringCounterMapProto {
+ optional string key = 1;
+ optional CounterProto value = 2;
+}
+
+message StringCounterGroupMapProto {
+ optional string key = 1;
+ optional CounterGroupProto value = 2;
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/proto/mr_service_protos.proto b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/proto/mr_service_protos.proto
new file mode 100644
index 0000000..beb1371
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/proto/mr_service_protos.proto
@@ -0,0 +1,83 @@
+option java_package = "org.apache.hadoop.mapreduce.v2.proto";
+option java_outer_classname = "MRServiceProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+
+import "mr_protos.proto";
+
+message GetJobReportRequestProto {
+ optional JobIdProto job_id = 1;
+}
+message GetJobReportResponseProto {
+ optional JobReportProto job_report = 1;
+}
+
+message GetTaskReportRequestProto {
+ optional TaskIdProto task_id = 1;
+}
+message GetTaskReportResponseProto {
+ optional TaskReportProto task_report = 1;
+}
+
+message GetTaskAttemptReportRequestProto {
+ optional TaskAttemptIdProto task_attempt_id = 1;
+}
+message GetTaskAttemptReportResponseProto {
+ optional TaskAttemptReportProto task_attempt_report = 1;
+}
+
+message GetCountersRequestProto {
+ optional JobIdProto job_id = 1;
+}
+message GetCountersResponseProto {
+ optional CountersProto counters = 1;
+}
+
+message GetTaskAttemptCompletionEventsRequestProto {
+ optional JobIdProto job_id = 1;
+ optional int32 from_event_id = 2;
+ optional int32 max_events = 3;
+}
+message GetTaskAttemptCompletionEventsResponseProto {
+ repeated TaskAttemptCompletionEventProto completion_events = 1;
+}
+
+message GetTaskReportsRequestProto {
+ optional JobIdProto job_id = 1;
+ optional TaskTypeProto task_type = 2;
+}
+message GetTaskReportsResponseProto {
+ repeated TaskReportProto task_reports = 1;
+}
+
+message GetDiagnosticsRequestProto {
+ optional TaskAttemptIdProto task_attempt_id = 1;
+}
+message GetDiagnosticsResponseProto {
+ repeated string diagnostics = 1;
+}
+
+
+message KillJobRequestProto {
+ optional JobIdProto job_id = 1;
+}
+message KillJobResponseProto {
+}
+
+message KillTaskRequestProto {
+ optional TaskIdProto task_id = 1;
+}
+message KillTaskResponseProto {
+}
+
+message KillTaskAttemptRequestProto {
+ optional TaskAttemptIdProto task_attempt_id = 1;
+}
+message KillTaskAttemptResponseProto {
+}
+
+message FailTaskAttemptRequestProto {
+ optional TaskAttemptIdProto task_attempt_id = 1;
+}
+message FailTaskAttemptResponseProto {
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRPCFactories.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRPCFactories.java
new file mode 100644
index 0000000..832eb7f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRPCFactories.java
@@ -0,0 +1,184 @@
+package org.apache.hadoop.mapreduce.v2;
+
+
+import java.net.InetSocketAddress;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.impl.pb.RpcClientFactoryPBImpl;
+import org.apache.hadoop.yarn.factories.impl.pb.RpcServerFactoryPBImpl;
+import org.junit.Test;
+
+public class TestRPCFactories {
+
+
+
+ @Test
+ public void test() {
+ testPbServerFactory();
+
+ testPbClientFactory();
+ }
+
+
+
+ private void testPbServerFactory() {
+ InetSocketAddress addr = new InetSocketAddress(0);
+ Configuration conf = new Configuration();
+ MRClientProtocol instance = new MRClientProtocolTestImpl();
+ Server server = null;
+ try {
+ server =
+ RpcServerFactoryPBImpl.get().getServer(
+ MRClientProtocol.class, instance, addr, conf, null, 1);
+ server.start();
+ } catch (YarnException e) {
+ e.printStackTrace();
+ Assert.fail("Failed to crete server");
+ } finally {
+ server.stop();
+ }
+ }
+
+
+ private void testPbClientFactory() {
+ InetSocketAddress addr = new InetSocketAddress(0);
+ System.err.println(addr.getHostName() + addr.getPort());
+ Configuration conf = new Configuration();
+ MRClientProtocol instance = new MRClientProtocolTestImpl();
+ Server server = null;
+ try {
+ server =
+ RpcServerFactoryPBImpl.get().getServer(
+ MRClientProtocol.class, instance, addr, conf, null, 1);
+ server.start();
+ System.err.println(server.getListenerAddress());
+ System.err.println(NetUtils.getConnectAddress(server));
+
+ MRClientProtocol client = null;
+ try {
+ client = (MRClientProtocol) RpcClientFactoryPBImpl.get().getClient(MRClientProtocol.class, 1, NetUtils.getConnectAddress(server), conf);
+ } catch (YarnException e) {
+ e.printStackTrace();
+ Assert.fail("Failed to crete client");
+ }
+
+ } catch (YarnException e) {
+ e.printStackTrace();
+ Assert.fail("Failed to crete server");
+ } finally {
+ server.stop();
+ }
+ }
+
+
+ public class MRClientProtocolTestImpl implements MRClientProtocol {
+
+ @Override
+ public GetJobReportResponse getJobReport(GetJobReportRequest request)
+ throws YarnRemoteException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public GetTaskReportResponse getTaskReport(GetTaskReportRequest request)
+ throws YarnRemoteException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public GetTaskAttemptReportResponse getTaskAttemptReport(
+ GetTaskAttemptReportRequest request) throws YarnRemoteException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public GetCountersResponse getCounters(GetCountersRequest request)
+ throws YarnRemoteException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public GetTaskAttemptCompletionEventsResponse getTaskAttemptCompletionEvents(
+ GetTaskAttemptCompletionEventsRequest request)
+ throws YarnRemoteException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
+ throws YarnRemoteException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public GetDiagnosticsResponse getDiagnostics(GetDiagnosticsRequest request)
+ throws YarnRemoteException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public KillJobResponse killJob(KillJobRequest request)
+ throws YarnRemoteException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public KillTaskResponse killTask(KillTaskRequest request)
+ throws YarnRemoteException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public KillTaskAttemptResponse killTaskAttempt(
+ KillTaskAttemptRequest request) throws YarnRemoteException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public FailTaskAttemptResponse failTaskAttempt(
+ FailTaskAttemptRequest request) throws YarnRemoteException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRecordFactory.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRecordFactory.java
new file mode 100644
index 0000000..6522b0b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRecordFactory.java
@@ -0,0 +1,37 @@
+package org.apache.hadoop.mapreduce.v2;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.CounterGroupPBImpl;
+import org.junit.Test;
+
+public class TestRecordFactory {
+
+ @Test
+ public void testPbRecordFactory() {
+ RecordFactory pbRecordFactory = RecordFactoryPBImpl.get();
+
+ try {
+ CounterGroup response = pbRecordFactory.newRecordInstance(CounterGroup.class);
+ Assert.assertEquals(CounterGroupPBImpl.class, response.getClass());
+ } catch (YarnException e) {
+ e.printStackTrace();
+ Assert.fail("Failed to crete record");
+ }
+
+ try {
+ GetCountersRequest response = pbRecordFactory.newRecordInstance(GetCountersRequest.class);
+ Assert.assertEquals(GetCountersRequestPBImpl.class, response.getClass());
+ } catch (YarnException e) {
+ e.printStackTrace();
+ Assert.fail("Failed to crete record");
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
new file mode 100644
index 0000000..2c9a701
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
@@ -0,0 +1,110 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.util;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+public class TestMRApps {
+
+ @Test public void testJobIDtoString() {
+ JobId jid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class);
+ jid.setAppId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class));
+ assertEquals("job_0_0_0", MRApps.toString(jid));
+ }
+
+ @Test public void testToJobID() {
+ JobId jid = MRApps.toJobID("job_1_1_1");
+ assertEquals(1, jid.getAppId().getClusterTimestamp());
+ assertEquals(1, jid.getAppId().getId());
+ assertEquals(1, jid.getId());
+ }
+
+ @Test(expected=YarnException.class) public void testJobIDShort() {
+ MRApps.toJobID("job_0_0");
+ }
+
+ //TODO_get.set
+ @Test public void testTaskIDtoString() {
+ TaskId tid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class);
+ tid.setJobId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class));
+ tid.getJobId().setAppId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class));
+ tid.setTaskType(TaskType.MAP);
+ TaskType type = tid.getTaskType();
+ System.err.println(type);
+ type = TaskType.REDUCE;
+ System.err.println(type);
+ System.err.println(tid.getTaskType());
+ assertEquals("task_0_0_0_m_0", MRApps.toString(tid));
+ tid.setTaskType(TaskType.REDUCE);
+ assertEquals("task_0_0_0_r_0", MRApps.toString(tid));
+ }
+
+ @Test public void testToTaskID() {
+ TaskId tid = MRApps.toTaskID("task_1_2_3_r_4");
+ assertEquals(1, tid.getJobId().getAppId().getClusterTimestamp());
+ assertEquals(2, tid.getJobId().getAppId().getId());
+ assertEquals(3, tid.getJobId().getId());
+ assertEquals(TaskType.REDUCE, tid.getTaskType());
+ assertEquals(4, tid.getId());
+
+ tid = MRApps.toTaskID("task_1_2_3_m_4");
+ assertEquals(TaskType.MAP, tid.getTaskType());
+ }
+
+ @Test(expected=YarnException.class) public void testTaskIDShort() {
+ MRApps.toTaskID("task_0_0_0_m");
+ }
+
+ @Test(expected=YarnException.class) public void testTaskIDBadType() {
+ MRApps.toTaskID("task_0_0_0_x_0");
+ }
+
+ //TODO_get.set
+ @Test public void testTaskAttemptIDtoString() {
+ TaskAttemptId taid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskAttemptId.class);
+ taid.setTaskId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class));
+ taid.getTaskId().setTaskType(TaskType.MAP);
+ taid.getTaskId().setJobId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class));
+ taid.getTaskId().getJobId().setAppId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class));
+ assertEquals("attempt_0_0_0_m_0_0", MRApps.toString(taid));
+ }
+
+ @Test public void testToTaskAttemptID() {
+ TaskAttemptId taid = MRApps.toTaskAttemptID("attempt_0_1_2_m_3_4");
+ assertEquals(0, taid.getTaskId().getJobId().getAppId().getClusterTimestamp());
+ assertEquals(1, taid.getTaskId().getJobId().getAppId().getId());
+ assertEquals(2, taid.getTaskId().getJobId().getId());
+ assertEquals(3, taid.getTaskId().getId());
+ assertEquals(4, taid.getId());
+ }
+
+ @Test(expected=YarnException.class) public void testTaskAttemptIDShort() {
+ MRApps.toTaskAttemptID("attempt_0_0_0_m_0");
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/pom.xml
new file mode 100644
index 0000000..9eb566a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/pom.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<project>
+ <parent>
+ <artifactId>hadoop-mapreduce-client</artifactId>
+ <groupId>org.apache.hadoop</groupId>
+ <version>${hadoop-mapreduce.version}</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-core</artifactId>
+ <name>hadoop-mapreduce-client-core</name>
+
+ <properties>
+ <install.file>${project.artifact.file}</install.file>
+ <mr.basedir>${project.parent.parent.basedir}</mr.basedir>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-hdfs</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.avro</groupId>
+ <artifactId>avro-maven-plugin</artifactId>
+ <version>1.5.1</version>
+ <executions>
+ <execution>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>protocol</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/Events.avpr b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/Events.avpr
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr
diff --git a/mapreduce/src/java/org/apache/hadoop/filecache/DistributedCache.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/filecache/DistributedCache.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/filecache/DistributedCache.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/filecache/DistributedCache.java
diff --git a/mapreduce/src/java/org/apache/hadoop/filecache/package-info.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/filecache/package-info.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/filecache/package-info.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/filecache/package-info.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/AuditLogger.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/AuditLogger.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/AuditLogger.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/AuditLogger.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java
new file mode 100644
index 0000000..026793c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java
@@ -0,0 +1,619 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.NoSuchElementException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.mapred.IFile.Reader;
+import org.apache.hadoop.mapred.IFile.Writer;
+import org.apache.hadoop.mapred.Merger.Segment;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+
+/**
+ * <code>BackupStore</code> is an utility class that is used to support
+ * the mark-reset functionality of values iterator
+ *
+ * <p>It has two caches - a memory cache and a file cache where values are
+ * stored as they are iterated, after a mark. On reset, values are retrieved
+ * from these caches. Framework moves from the memory cache to the
+ * file cache when the memory cache becomes full.
+ *
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class BackupStore<K,V> {
+
+ private static final Log LOG = LogFactory.getLog(BackupStore.class.getName());
+ private static final int MAX_VINT_SIZE = 9;
+ private static final int EOF_MARKER_SIZE = 2 * MAX_VINT_SIZE;
+ private final TaskAttemptID tid;
+
+ private MemoryCache memCache;
+ private FileCache fileCache;
+
+ List<Segment<K,V>> segmentList = new LinkedList<Segment<K,V>>();
+ private int readSegmentIndex = 0;
+ private int firstSegmentOffset = 0;
+
+ private int currentKVOffset = 0;
+ private int nextKVOffset = -1;
+
+ private DataInputBuffer currentKey = null;
+ private DataInputBuffer currentValue = new DataInputBuffer();
+ private DataInputBuffer currentDiskValue = new DataInputBuffer();
+
+ private boolean hasMore = false;
+ private boolean inReset = false;
+ private boolean clearMarkFlag = false;
+ private boolean lastSegmentEOF = false;
+
+ public BackupStore(Configuration conf, TaskAttemptID taskid)
+ throws IOException {
+
+ final float bufferPercent =
+ conf.getFloat(JobContext.REDUCE_MARKRESET_BUFFER_PERCENT, 0f);
+
+ if (bufferPercent > 1.0 || bufferPercent < 0.0) {
+ throw new IOException(JobContext.REDUCE_MARKRESET_BUFFER_PERCENT +
+ bufferPercent);
+ }
+
+ int maxSize = (int)Math.min(
+ Runtime.getRuntime().maxMemory() * bufferPercent, Integer.MAX_VALUE);
+
+ // Support an absolute size also.
+ int tmp = conf.getInt(JobContext.REDUCE_MARKRESET_BUFFER_SIZE, 0);
+ if (tmp > 0) {
+ maxSize = tmp;
+ }
+
+ memCache = new MemoryCache(maxSize);
+ fileCache = new FileCache(conf);
+ tid = taskid;
+
+ LOG.info("Created a new BackupStore with a memory of " + maxSize);
+
+ }
+
+ /**
+ * Write the given K,V to the cache.
+ * Write to memcache if space is available, else write to the filecache
+ * @param key
+ * @param value
+ * @throws IOException
+ */
+ public void write(DataInputBuffer key, DataInputBuffer value)
+ throws IOException {
+
+ assert (key != null && value != null);
+
+ if (fileCache.isActive()) {
+ fileCache.write(key, value);
+ return;
+ }
+
+ if (memCache.reserveSpace(key, value)) {
+ memCache.write(key, value);
+ } else {
+ fileCache.activate();
+ fileCache.write(key, value);
+ }
+ }
+
+ public void mark() throws IOException {
+
+ // We read one KV pair in advance in hasNext.
+ // If hasNext has read the next KV pair from a new segment, but the
+ // user has not called next() for that KV, then reset the readSegmentIndex
+ // to the previous segment
+
+ if (nextKVOffset == 0) {
+ assert (readSegmentIndex != 0);
+ assert (currentKVOffset != 0);
+ readSegmentIndex --;
+ }
+
+ // just drop segments before the current active segment
+
+ int i = 0;
+ Iterator<Segment<K,V>> itr = segmentList.iterator();
+ while (itr.hasNext()) {
+ Segment<K,V> s = itr.next();
+ if (i == readSegmentIndex) {
+ break;
+ }
+ s.close();
+ itr.remove();
+ i++;
+ LOG.debug("Dropping a segment");
+ }
+
+ // FirstSegmentOffset is the offset in the current segment from where we
+ // need to start reading on the next reset
+
+ firstSegmentOffset = currentKVOffset;
+ readSegmentIndex = 0;
+
+ LOG.debug("Setting the FirsSegmentOffset to " + currentKVOffset);
+ }
+
+ public void reset() throws IOException {
+
+ // Create a new segment for the previously written records only if we
+ // are not already in the reset mode
+
+ if (!inReset) {
+ if (fileCache.isActive) {
+ fileCache.createInDiskSegment();
+ } else {
+ memCache.createInMemorySegment();
+ }
+ }
+
+ inReset = true;
+
+ // Reset the segments to the correct position from where the next read
+ // should begin.
+ for (int i = 0; i < segmentList.size(); i++) {
+ Segment<K,V> s = segmentList.get(i);
+ if (s.inMemory()) {
+ int offset = (i == 0) ? firstSegmentOffset : 0;
+ s.getReader().reset(offset);
+ } else {
+ s.closeReader();
+ if (i == 0) {
+ s.reinitReader(firstSegmentOffset);
+ s.getReader().disableChecksumValidation();
+ }
+ }
+ }
+
+ currentKVOffset = firstSegmentOffset;
+ nextKVOffset = -1;
+ readSegmentIndex = 0;
+ hasMore = false;
+ lastSegmentEOF = false;
+
+ LOG.debug("Reset - First segment offset is " + firstSegmentOffset +
+ " Segment List Size is " + segmentList.size());
+ }
+
+ public boolean hasNext() throws IOException {
+
+ if (lastSegmentEOF) {
+ return false;
+ }
+
+ // We read the next KV from the cache to decide if there is any left.
+ // Since hasNext can be called several times before the actual call to
+ // next(), we use hasMore to avoid extra reads. hasMore is set to false
+ // when the user actually consumes this record in next()
+
+ if (hasMore) {
+ return true;
+ }
+
+ Segment<K,V> seg = segmentList.get(readSegmentIndex);
+ // Mark the current position. This would be set to currentKVOffset
+ // when the user consumes this record in next().
+ nextKVOffset = (int) seg.getActualPosition();
+ if (seg.nextRawKey()) {
+ currentKey = seg.getKey();
+ seg.getValue(currentValue);
+ hasMore = true;
+ return true;
+ } else {
+ if (!seg.inMemory()) {
+ seg.closeReader();
+ }
+ }
+
+ // If this is the last segment, mark the lastSegmentEOF flag and return
+ if (readSegmentIndex == segmentList.size() - 1) {
+ nextKVOffset = -1;
+ lastSegmentEOF = true;
+ return false;
+ }
+
+ nextKVOffset = 0;
+ readSegmentIndex ++;
+
+ Segment<K,V> nextSegment = segmentList.get(readSegmentIndex);
+
+ // We possibly are moving from a memory segment to a disk segment.
+ // Reset so that we do not corrupt the in-memory segment buffer.
+ // See HADOOP-5494
+
+ if (!nextSegment.inMemory()) {
+ currentValue.reset(currentDiskValue.getData(),
+ currentDiskValue.getLength());
+ nextSegment.init(null);
+ }
+
+ if (nextSegment.nextRawKey()) {
+ currentKey = nextSegment.getKey();
+ nextSegment.getValue(currentValue);
+ hasMore = true;
+ return true;
+ } else {
+ throw new IOException("New segment did not have even one K/V");
+ }
+ }
+
+ public void next() throws IOException {
+ if (!hasNext()) {
+ throw new NoSuchElementException("iterate past last value");
+ }
+ // Reset hasMore. See comment in hasNext()
+ hasMore = false;
+ currentKVOffset = nextKVOffset;
+ nextKVOffset = -1;
+ }
+
+ public DataInputBuffer nextValue() {
+ return currentValue;
+ }
+
+ public DataInputBuffer nextKey() {
+ return currentKey;
+ }
+
+ public void reinitialize() throws IOException {
+ if (segmentList.size() != 0) {
+ clearSegmentList();
+ }
+ memCache.reinitialize(true);
+ fileCache.reinitialize();
+ readSegmentIndex = firstSegmentOffset = 0;
+ currentKVOffset = 0;
+ nextKVOffset = -1;
+ hasMore = inReset = clearMarkFlag = false;
+ }
+
+ /**
+ * This function is called the ValuesIterator when a mark is called
+ * outside of a reset zone.
+ */
+ public void exitResetMode() throws IOException {
+ inReset = false;
+ if (clearMarkFlag ) {
+ // If a flag was set to clear mark, do the reinit now.
+ // See clearMark()
+ reinitialize();
+ return;
+ }
+ if (!fileCache.isActive) {
+ memCache.reinitialize(false);
+ }
+ }
+
+ /** For writing the first key and value bytes directly from the
+ * value iterators, pass the current underlying output stream
+ * @param length The length of the impending write
+ */
+ public DataOutputStream getOutputStream(int length) throws IOException {
+ if (memCache.reserveSpace(length)) {
+ return memCache.dataOut;
+ } else {
+ fileCache.activate();
+ return fileCache.writer.getOutputStream();
+ }
+ }
+
+ /** This method is called by the valueIterators after writing the first
+ * key and value bytes to the BackupStore
+ * @param length
+ */
+ public void updateCounters(int length) {
+ if (fileCache.isActive) {
+ fileCache.writer.updateCountersForExternalAppend(length);
+ } else {
+ memCache.usedSize += length;
+ }
+ }
+
+ public void clearMark() throws IOException {
+ if (inReset) {
+ // If we are in the reset mode, we just mark a flag and come out
+ // The actual re initialization would be done when we exit the reset
+ // mode
+ clearMarkFlag = true;
+ } else {
+ reinitialize();
+ }
+ }
+
+ private void clearSegmentList() throws IOException {
+ for (Segment<K,V> segment: segmentList) {
+ long len = segment.getLength();
+ segment.close();
+ if (segment.inMemory()) {
+ memCache.unreserve(len);
+ }
+ }
+ segmentList.clear();
+ }
+
+ class MemoryCache {
+ private DataOutputBuffer dataOut;
+ private int blockSize;
+ private int usedSize;
+ private final BackupRamManager ramManager;
+
+ // Memory cache is made up of blocks.
+ private int defaultBlockSize = 1024 * 1024;
+
+ public MemoryCache(int maxSize) {
+ ramManager = new BackupRamManager(maxSize);
+ if (maxSize < defaultBlockSize) {
+ defaultBlockSize = maxSize;
+ }
+ }
+
+ public void unreserve(long len) {
+ ramManager.unreserve((int)len);
+ }
+
+ /**
+ * Re-initialize the memory cache.
+ *
+ * @param clearAll If true, re-initialize the ramManager also.
+ */
+ void reinitialize(boolean clearAll) {
+ if (clearAll) {
+ ramManager.reinitialize();
+ }
+ int allocatedSize = createNewMemoryBlock(defaultBlockSize,
+ defaultBlockSize);
+ assert(allocatedSize == defaultBlockSize || allocatedSize == 0);
+ LOG.debug("Created a new mem block of " + allocatedSize);
+ }
+
+ private int createNewMemoryBlock(int requestedSize, int minSize) {
+ int allocatedSize = ramManager.reserve(requestedSize, minSize);
+ usedSize = 0;
+ if (allocatedSize == 0) {
+ dataOut = null;
+ blockSize = 0;
+ } else {
+ dataOut = new DataOutputBuffer(allocatedSize);
+ blockSize = allocatedSize;
+ }
+ return allocatedSize;
+ }
+
+ /**
+ * This method determines if there is enough space left in the
+ * memory cache to write to the requested length + space for
+ * subsequent EOF makers.
+ * @param length
+ * @return true if enough space is available
+ */
+ boolean reserveSpace(int length) throws IOException {
+ int availableSize = blockSize - usedSize;
+ if (availableSize >= length + EOF_MARKER_SIZE) {
+ return true;
+ }
+ // Not enough available. Close this block
+ assert (!inReset);
+
+ createInMemorySegment();
+
+ // Create a new block
+ int tmp = Math.max(length + EOF_MARKER_SIZE, defaultBlockSize);
+ availableSize = createNewMemoryBlock(tmp,
+ (length + EOF_MARKER_SIZE));
+
+ return (availableSize == 0) ? false : true;
+ }
+
+ boolean reserveSpace(DataInputBuffer key, DataInputBuffer value)
+ throws IOException {
+ int keyLength = key.getLength() - key.getPosition();
+ int valueLength = value.getLength() - value.getPosition();
+
+ int requestedSize = keyLength + valueLength +
+ WritableUtils.getVIntSize(keyLength) +
+ WritableUtils.getVIntSize(valueLength);
+ return reserveSpace(requestedSize);
+ }
+
+ /**
+ * Write the key and value to the cache in the IFile format
+ * @param key
+ * @param value
+ * @throws IOException
+ */
+ public void write(DataInputBuffer key, DataInputBuffer value)
+ throws IOException {
+ int keyLength = key.getLength() - key.getPosition();
+ int valueLength = value.getLength() - value.getPosition();
+ WritableUtils.writeVInt(dataOut, keyLength);
+ WritableUtils.writeVInt(dataOut, valueLength);
+ dataOut.write(key.getData(), key.getPosition(), keyLength);
+ dataOut.write(value.getData(), value.getPosition(), valueLength);
+ usedSize += keyLength + valueLength +
+ WritableUtils.getVIntSize(keyLength) +
+ WritableUtils.getVIntSize(valueLength);
+ LOG.debug("ID: " + segmentList.size() + " WRITE TO MEM");
+ }
+
+ /**
+ * This method creates a memory segment from the existing buffer
+ * @throws IOException
+ */
+ void createInMemorySegment () throws IOException {
+
+ // If nothing was written in this block because the record size
+ // was greater than the allocated block size, just return.
+ if (usedSize == 0) {
+ ramManager.unreserve(blockSize);
+ return;
+ }
+
+ // spaceAvailable would have ensured that there is enough space
+ // left for the EOF markers.
+ assert ((blockSize - usedSize) >= EOF_MARKER_SIZE);
+
+ WritableUtils.writeVInt(dataOut, IFile.EOF_MARKER);
+ WritableUtils.writeVInt(dataOut, IFile.EOF_MARKER);
+
+ usedSize += EOF_MARKER_SIZE;
+
+ ramManager.unreserve(blockSize - usedSize);
+
+ Reader<K, V> reader =
+ new org.apache.hadoop.mapreduce.task.reduce.InMemoryReader<K, V>(null,
+ (org.apache.hadoop.mapred.TaskAttemptID) tid,
+ dataOut.getData(), 0, usedSize);
+ Segment<K, V> segment = new Segment<K, V>(reader, false);
+ segmentList.add(segment);
+ LOG.debug("Added Memory Segment to List. List Size is " +
+ segmentList.size());
+ }
+ }
+
+ class FileCache {
+ private LocalDirAllocator lDirAlloc;
+ private final Configuration conf;
+ private final FileSystem fs;
+ private boolean isActive = false;
+
+ private Path file = null;
+ private IFile.Writer<K,V> writer = null;
+ private int spillNumber = 0;
+
+ public FileCache(Configuration conf)
+ throws IOException {
+ this.conf = conf;
+ this.fs = FileSystem.getLocal(conf);
+ this.lDirAlloc = new LocalDirAllocator(MRConfig.LOCAL_DIR);
+ }
+
+ void write(DataInputBuffer key, DataInputBuffer value)
+ throws IOException {
+ if (writer == null) {
+ // If spillNumber is 0, we should have called activate and not
+ // come here at all
+ assert (spillNumber != 0);
+ writer = createSpillFile();
+ }
+ writer.append(key, value);
+ LOG.debug("ID: " + segmentList.size() + " WRITE TO DISK");
+ }
+
+ void reinitialize() {
+ spillNumber = 0;
+ writer = null;
+ isActive = false;
+ }
+
+ void activate() throws IOException {
+ isActive = true;
+ writer = createSpillFile();
+ }
+
+ void createInDiskSegment() throws IOException {
+ assert (writer != null);
+ writer.close();
+ Segment<K,V> s = new Segment<K, V>(conf, fs, file, null, true);
+ writer = null;
+ segmentList.add(s);
+ LOG.debug("Disk Segment added to List. Size is " + segmentList.size());
+ }
+
+ boolean isActive() { return isActive; }
+
+ private Writer<K,V> createSpillFile() throws IOException {
+ Path tmp =
+ new Path(Constants.OUTPUT + "/backup_" + tid.getId() + "_"
+ + (spillNumber++) + ".out");
+
+ LOG.info("Created file: " + tmp);
+
+ file = lDirAlloc.getLocalPathForWrite(tmp.toUri().getPath(),
+ -1, conf);
+ return new Writer<K, V>(conf, fs, file);
+ }
+ }
+
+ static class BackupRamManager implements RamManager {
+
+ private int availableSize = 0;
+ private final int maxSize;
+
+ public BackupRamManager(int size) {
+ availableSize = maxSize = size;
+ }
+
+ public boolean reserve(int requestedSize, InputStream in) {
+ // Not used
+ LOG.warn("Reserve(int, InputStream) not supported by BackupRamManager");
+ return false;
+ }
+
+ int reserve(int requestedSize) {
+ if (availableSize == 0) {
+ return 0;
+ }
+ int reservedSize = Math.min(requestedSize, availableSize);
+ availableSize -= reservedSize;
+ LOG.debug("Reserving: " + reservedSize + " Requested: " + requestedSize);
+ return reservedSize;
+ }
+
+ int reserve(int requestedSize, int minSize) {
+ if (availableSize < minSize) {
+ LOG.debug("No Space available. Available: " + availableSize +
+ " MinSize: " + minSize);
+ return 0;
+ } else {
+ return reserve(requestedSize);
+ }
+ }
+
+ public void unreserve(int requestedSize) {
+ availableSize += requestedSize;
+ LOG.debug("Unreserving: " + requestedSize +
+ ". Available: " + availableSize);
+ }
+
+ void reinitialize() {
+ availableSize = maxSize;
+ }
+ }
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/BasicTypeSorterBase.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BasicTypeSorterBase.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/BasicTypeSorterBase.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BasicTypeSorterBase.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/BufferSorter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BufferSorter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/BufferSorter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BufferSorter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/CleanupQueue.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CleanupQueue.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/CleanupQueue.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CleanupQueue.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/Clock.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Clock.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/Clock.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Clock.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/ClusterStatus.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ClusterStatus.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/ClusterStatus.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ClusterStatus.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Constants.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Constants.java
new file mode 100644
index 0000000..e8a202e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Constants.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+public class Constants {
+ static final String OUTPUT = "output";
+ public static final String HADOOP_WORK_DIR = "HADOOP_WORK_DIR";
+ public static final String JOBFILE = "job.xml";
+ public static final String STDOUT_LOGFILE_ENV = "STDOUT_LOGFILE_ENV";
+ public static final String STDERR_LOGFILE_ENV = "STDERR_LOGFILE_ENV";
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/Counters.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/Counters.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/CumulativePeriodicStats.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CumulativePeriodicStats.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/CumulativePeriodicStats.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CumulativePeriodicStats.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/DeprecatedQueueConfigurationParser.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/DeprecatedQueueConfigurationParser.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/DeprecatedQueueConfigurationParser.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/DeprecatedQueueConfigurationParser.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/FileAlreadyExistsException.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileAlreadyExistsException.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/FileAlreadyExistsException.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileAlreadyExistsException.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/FileInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/FileInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/FileOutputCommitter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputCommitter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/FileOutputCommitter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputCommitter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/FileOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/FileOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/FileSplit.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileSplit.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/FileSplit.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileSplit.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/ID.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ID.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/ID.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ID.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/IFile.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFile.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/IFile.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFile.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/IFileInputStream.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFileInputStream.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/IFileInputStream.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFileInputStream.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/IFileOutputStream.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFileOutputStream.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/IFileOutputStream.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFileOutputStream.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/IndexCache.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IndexCache.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/IndexCache.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IndexCache.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/InputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/InputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/InputSplit.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InputSplit.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/InputSplit.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InputSplit.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/InvalidFileTypeException.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InvalidFileTypeException.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/InvalidFileTypeException.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InvalidFileTypeException.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/InvalidInputException.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InvalidInputException.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/InvalidInputException.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InvalidInputException.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/InvalidJobConfException.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InvalidJobConfException.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/InvalidJobConfException.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InvalidJobConfException.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JVMId.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JVMId.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JVMId.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JVMId.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobACLsManager.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobACLsManager.java
new file mode 100644
index 0000000..e1ce1b9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobACLsManager.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
+
+@InterfaceAudience.Private
+public class JobACLsManager {
+
+ Configuration conf;
+
+ public JobACLsManager(Configuration conf) {
+ this.conf = conf;
+ }
+
+ public boolean areACLsEnabled() {
+ return conf.getBoolean(MRConfig.MR_ACLS_ENABLED, false);
+ }
+
+ /**
+ * Construct the jobACLs from the configuration so that they can be kept in
+ * the memory. If authorization is disabled on the JT, nothing is constructed
+ * and an empty map is returned.
+ *
+ * @return JobACL to AccessControlList map.
+ */
+ public Map<JobACL, AccessControlList> constructJobACLs(Configuration conf) {
+
+ Map<JobACL, AccessControlList> acls =
+ new HashMap<JobACL, AccessControlList>();
+
+ // Don't construct anything if authorization is disabled.
+ if (!areACLsEnabled()) {
+ return acls;
+ }
+
+ for (JobACL aclName : JobACL.values()) {
+ String aclConfigName = aclName.getAclName();
+ String aclConfigured = conf.get(aclConfigName);
+ if (aclConfigured == null) {
+ // If ACLs are not configured at all, we grant no access to anyone. So
+ // jobOwner and cluster administrator _only_ can do 'stuff'
+ aclConfigured = " ";
+ }
+ acls.put(aclName, new AccessControlList(aclConfigured));
+ }
+ return acls;
+ }
+
+ /**
+ * If authorization is enabled, checks whether the user (in the callerUGI)
+ * is authorized to perform the operation specified by 'jobOperation' on
+ * the job by checking if the user is jobOwner or part of job ACL for the
+ * specific job operation.
+ * <ul>
+ * <li>The owner of the job can do any operation on the job</li>
+ * <li>For all other users/groups job-acls are checked</li>
+ * </ul>
+ * @param callerUGI
+ * @param jobOperation
+ * @param jobOwner
+ * @param jobACL
+ * @throws AccessControlException
+ */
+ public boolean checkAccess(UserGroupInformation callerUGI,
+ JobACL jobOperation, String jobOwner, AccessControlList jobACL) {
+
+ String user = callerUGI.getShortUserName();
+ if (!areACLsEnabled()) {
+ return true;
+ }
+
+ // Allow Job-owner for any operation on the job
+ if (user.equals(jobOwner)
+ || jobACL.isUserAllowed(callerUGI)) {
+ return true;
+ }
+
+ return false;
+ }
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobClient.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobClient.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobConf.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobConf.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobConfigurable.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConfigurable.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobConfigurable.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConfigurable.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobContext.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobContext.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobContext.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobContext.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobContextImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobContextImpl.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobContextImpl.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobContextImpl.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobEndNotifier.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobEndNotifier.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobEndNotifier.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobEndNotifier.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobID.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobID.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobID.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobID.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobInfo.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobInfo.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobInfo.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobInfo.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobPriority.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobPriority.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobPriority.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobPriority.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobProfile.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobProfile.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobProfile.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobProfile.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobQueueInfo.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueInfo.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobQueueInfo.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueInfo.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobStatus.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobStatus.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobStatus.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobStatus.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JvmContext.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JvmContext.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JvmContext.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JvmContext.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JvmTask.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JvmTask.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JvmTask.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JvmTask.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/KeyValueLineRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/KeyValueLineRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/KeyValueLineRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/KeyValueLineRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/KeyValueTextInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/KeyValueTextInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/KeyValueTextInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/KeyValueTextInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/LineRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LineRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/LineRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LineRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/MRConstants.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MRConstants.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/MRConstants.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MRConstants.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MROutputFiles.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MROutputFiles.java
new file mode 100644
index 0000000..e81e11d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MROutputFiles.java
@@ -0,0 +1,226 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.MRConfig;
+
+/**
+ * Manipulate the working area for the transient store for maps and reduces.
+ *
+ * This class is used by map and reduce tasks to identify the directories that
+ * they need to write to/read from for intermediate files. The callers of
+ * these methods are from the Child running the Task.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class MROutputFiles extends MapOutputFile {
+
+ private LocalDirAllocator lDirAlloc =
+ new LocalDirAllocator(MRConfig.LOCAL_DIR);
+
+ public MROutputFiles() {
+ }
+
+ /**
+ * Return the path to local map output file created earlier
+ *
+ * @return path
+ * @throws IOException
+ */
+ @Override
+ public Path getOutputFile()
+ throws IOException {
+ return lDirAlloc.getLocalPathToRead(Constants.OUTPUT + Path.SEPARATOR
+ + MAP_OUTPUT_FILENAME_STRING, getConf());
+ }
+
+ /**
+ * Create a local map output file name.
+ *
+ * @param size the size of the file
+ * @return path
+ * @throws IOException
+ */
+ @Override
+ public Path getOutputFileForWrite(long size)
+ throws IOException {
+ return lDirAlloc.getLocalPathForWrite(Constants.OUTPUT + Path.SEPARATOR
+ + MAP_OUTPUT_FILENAME_STRING, size, getConf());
+ }
+
+ /**
+ * Create a local map output file name on the same volume.
+ */
+ @Override
+ public Path getOutputFileForWriteInVolume(Path existing) {
+ return new Path(existing.getParent(), MAP_OUTPUT_FILENAME_STRING);
+ }
+
+ /**
+ * Return the path to a local map output index file created earlier
+ *
+ * @return path
+ * @throws IOException
+ */
+ @Override
+ public Path getOutputIndexFile()
+ throws IOException {
+ return lDirAlloc.getLocalPathToRead(Constants.OUTPUT + Path.SEPARATOR
+ + MAP_OUTPUT_FILENAME_STRING + MAP_OUTPUT_INDEX_SUFFIX_STRING,
+ getConf());
+ }
+
+ /**
+ * Create a local map output index file name.
+ *
+ * @param size the size of the file
+ * @return path
+ * @throws IOException
+ */
+ @Override
+ public Path getOutputIndexFileForWrite(long size)
+ throws IOException {
+ return lDirAlloc.getLocalPathForWrite(Constants.OUTPUT + Path.SEPARATOR
+ + MAP_OUTPUT_FILENAME_STRING + MAP_OUTPUT_INDEX_SUFFIX_STRING,
+ size, getConf());
+ }
+
+ /**
+ * Create a local map output index file name on the same volume.
+ */
+ @Override
+ public Path getOutputIndexFileForWriteInVolume(Path existing) {
+ return new Path(existing.getParent(),
+ MAP_OUTPUT_FILENAME_STRING + MAP_OUTPUT_INDEX_SUFFIX_STRING);
+ }
+
+ /**
+ * Return a local map spill file created earlier.
+ *
+ * @param spillNumber the number
+ * @return path
+ * @throws IOException
+ */
+ @Override
+ public Path getSpillFile(int spillNumber)
+ throws IOException {
+ return lDirAlloc.getLocalPathToRead(Constants.OUTPUT + "/spill"
+ + spillNumber + ".out", getConf());
+ }
+
+ /**
+ * Create a local map spill file name.
+ *
+ * @param spillNumber the number
+ * @param size the size of the file
+ * @return path
+ * @throws IOException
+ */
+ @Override
+ public Path getSpillFileForWrite(int spillNumber, long size)
+ throws IOException {
+ return lDirAlloc.getLocalPathForWrite(Constants.OUTPUT + "/spill"
+ + spillNumber + ".out", size, getConf());
+ }
+
+ /**
+ * Return a local map spill index file created earlier
+ *
+ * @param spillNumber the number
+ * @return path
+ * @throws IOException
+ */
+ @Override
+ public Path getSpillIndexFile(int spillNumber)
+ throws IOException {
+ return lDirAlloc.getLocalPathToRead(Constants.OUTPUT + "/spill"
+ + spillNumber + ".out.index", getConf());
+ }
+
+ /**
+ * Create a local map spill index file name.
+ *
+ * @param spillNumber the number
+ * @param size the size of the file
+ * @return path
+ * @throws IOException
+ */
+ @Override
+ public Path getSpillIndexFileForWrite(int spillNumber, long size)
+ throws IOException {
+ return lDirAlloc.getLocalPathForWrite(Constants.OUTPUT + "/spill"
+ + spillNumber + ".out.index", size, getConf());
+ }
+
+ /**
+ * Return a local reduce input file created earlier
+ *
+ * @param mapId a map task id
+ * @return path
+ * @throws IOException
+ */
+ @Override
+ public Path getInputFile(int mapId)
+ throws IOException {
+ return lDirAlloc.getLocalPathToRead(String.format(
+ REDUCE_INPUT_FILE_FORMAT_STRING, Constants.OUTPUT, Integer
+ .valueOf(mapId)), getConf());
+ }
+
+ /**
+ * Create a local reduce input file name.
+ *
+ * @param mapId a map task id
+ * @param size the size of the file
+ * @return path
+ * @throws IOException
+ */
+ @Override
+ public Path getInputFileForWrite(org.apache.hadoop.mapreduce.TaskID mapId,
+ long size)
+ throws IOException {
+ return lDirAlloc.getLocalPathForWrite(String.format(
+ REDUCE_INPUT_FILE_FORMAT_STRING, Constants.OUTPUT, mapId.getId()),
+ size, getConf());
+ }
+
+ /** Removes all of the files related to a task. */
+ @Override
+ public void removeAll()
+ throws IOException {
+ ((JobConf)getConf()).deleteLocalFiles(Constants.OUTPUT);
+ }
+
+ @Override
+ public void setConf(Configuration conf) {
+ if (!(conf instanceof JobConf)) {
+ conf = new JobConf(conf);
+ }
+ super.setConf(conf);
+ }
+
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/MapFileOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapFileOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/MapFileOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapFileOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/MapOutputFile.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapOutputFile.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/MapOutputFile.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapOutputFile.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/MapReduceBase.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapReduceBase.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/MapReduceBase.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapReduceBase.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/MapRunnable.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapRunnable.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/MapRunnable.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapRunnable.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/MapRunner.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapRunner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/MapRunner.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapRunner.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
new file mode 100644
index 0000000..97ebfa1
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
@@ -0,0 +1,1862 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.nio.IntBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystem.Statistics;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.RawComparator;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.SequenceFile.CompressionType;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.DefaultCodec;
+import org.apache.hadoop.io.serializer.Deserializer;
+import org.apache.hadoop.io.serializer.SerializationFactory;
+import org.apache.hadoop.io.serializer.Serializer;
+import org.apache.hadoop.mapred.IFile.Writer;
+import org.apache.hadoop.mapred.Merger.Segment;
+import org.apache.hadoop.mapred.SortedRanges.SkipRangeIterator;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.TaskCounter;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter;
+import org.apache.hadoop.mapreduce.lib.map.WrappedMapper;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter;
+import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitIndex;
+import org.apache.hadoop.mapreduce.task.MapContextImpl;
+import org.apache.hadoop.util.IndexedSortable;
+import org.apache.hadoop.util.IndexedSorter;
+import org.apache.hadoop.util.Progress;
+import org.apache.hadoop.util.QuickSort;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.StringUtils;
+
+/** A Map task. */
+class MapTask extends Task {
+ /**
+ * The size of each record in the index file for the map-outputs.
+ */
+ public static final int MAP_OUTPUT_INDEX_RECORD_LENGTH = 24;
+
+ private TaskSplitIndex splitMetaInfo = new TaskSplitIndex();
+ private final static int APPROX_HEADER_LENGTH = 150;
+
+ private static final Log LOG = LogFactory.getLog(MapTask.class.getName());
+
+ private Progress mapPhase;
+ private Progress sortPhase;
+
+ { // set phase for this task
+ setPhase(TaskStatus.Phase.MAP);
+ getProgress().setStatus("map");
+ }
+
+ public MapTask() {
+ super();
+ }
+
+ public MapTask(String jobFile, TaskAttemptID taskId,
+ int partition, TaskSplitIndex splitIndex,
+ int numSlotsRequired) {
+ super(jobFile, taskId, partition, numSlotsRequired);
+ this.splitMetaInfo = splitIndex;
+ }
+
+ @Override
+ public boolean isMapTask() {
+ return true;
+ }
+
+ @Override
+ public void localizeConfiguration(JobConf conf)
+ throws IOException {
+ super.localizeConfiguration(conf);
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ super.write(out);
+ if (isMapOrReduce()) {
+ splitMetaInfo.write(out);
+ splitMetaInfo = null;
+ }
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ super.readFields(in);
+ if (isMapOrReduce()) {
+ splitMetaInfo.readFields(in);
+ }
+ }
+
+ /**
+ * This class wraps the user's record reader to update the counters and progress
+ * as records are read.
+ * @param <K>
+ * @param <V>
+ */
+ class TrackedRecordReader<K, V>
+ implements RecordReader<K,V> {
+ private RecordReader<K,V> rawIn;
+ private Counters.Counter fileInputByteCounter;
+ private Counters.Counter inputRecordCounter;
+ private TaskReporter reporter;
+ private long bytesInPrev = -1;
+ private long bytesInCurr = -1;
+ private final Statistics fsStats;
+
+ TrackedRecordReader(TaskReporter reporter, JobConf job)
+ throws IOException{
+ inputRecordCounter = reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
+ fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ);
+ this.reporter = reporter;
+
+ Statistics matchedStats = null;
+ if (this.reporter.getInputSplit() instanceof FileSplit) {
+ matchedStats = getFsStatistics(((FileSplit) this.reporter
+ .getInputSplit()).getPath(), job);
+ }
+ fsStats = matchedStats;
+
+ bytesInPrev = getInputBytes(fsStats);
+ rawIn = job.getInputFormat().getRecordReader(reporter.getInputSplit(),
+ job, reporter);
+ bytesInCurr = getInputBytes(fsStats);
+ fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
+ }
+
+ public K createKey() {
+ return rawIn.createKey();
+ }
+
+ public V createValue() {
+ return rawIn.createValue();
+ }
+
+ public synchronized boolean next(K key, V value)
+ throws IOException {
+ boolean ret = moveToNext(key, value);
+ if (ret) {
+ incrCounters();
+ }
+ return ret;
+ }
+
+ protected void incrCounters() {
+ inputRecordCounter.increment(1);
+ }
+
+ protected synchronized boolean moveToNext(K key, V value)
+ throws IOException {
+ bytesInPrev = getInputBytes(fsStats);
+ boolean ret = rawIn.next(key, value);
+ bytesInCurr = getInputBytes(fsStats);
+ fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
+ reporter.setProgress(getProgress());
+ return ret;
+ }
+
+ public long getPos() throws IOException { return rawIn.getPos(); }
+
+ public void close() throws IOException {
+ bytesInPrev = getInputBytes(fsStats);
+ rawIn.close();
+ bytesInCurr = getInputBytes(fsStats);
+ fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
+ }
+
+ public float getProgress() throws IOException {
+ return rawIn.getProgress();
+ }
+ TaskReporter getTaskReporter() {
+ return reporter;
+ }
+
+ private long getInputBytes(Statistics stats) {
+ return stats == null ? 0 : stats.getBytesRead();
+ }
+ }
+
+ /**
+ * This class skips the records based on the failed ranges from previous
+ * attempts.
+ */
+ class SkippingRecordReader<K, V> extends TrackedRecordReader<K,V> {
+ private SkipRangeIterator skipIt;
+ private SequenceFile.Writer skipWriter;
+ private boolean toWriteSkipRecs;
+ private TaskUmbilicalProtocol umbilical;
+ private Counters.Counter skipRecCounter;
+ private long recIndex = -1;
+
+ SkippingRecordReader(TaskUmbilicalProtocol umbilical,
+ TaskReporter reporter, JobConf job) throws IOException{
+ super(reporter, job);
+ this.umbilical = umbilical;
+ this.skipRecCounter = reporter.getCounter(TaskCounter.MAP_SKIPPED_RECORDS);
+ this.toWriteSkipRecs = toWriteSkipRecs() &&
+ SkipBadRecords.getSkipOutputPath(conf)!=null;
+ skipIt = getSkipRanges().skipRangeIterator();
+ }
+
+ public synchronized boolean next(K key, V value)
+ throws IOException {
+ if(!skipIt.hasNext()) {
+ LOG.warn("Further records got skipped.");
+ return false;
+ }
+ boolean ret = moveToNext(key, value);
+ long nextRecIndex = skipIt.next();
+ long skip = 0;
+ while(recIndex<nextRecIndex && ret) {
+ if(toWriteSkipRecs) {
+ writeSkippedRec(key, value);
+ }
+ ret = moveToNext(key, value);
+ skip++;
+ }
+ //close the skip writer once all the ranges are skipped
+ if(skip>0 && skipIt.skippedAllRanges() && skipWriter!=null) {
+ skipWriter.close();
+ }
+ skipRecCounter.increment(skip);
+ reportNextRecordRange(umbilical, recIndex);
+ if (ret) {
+ incrCounters();
+ }
+ return ret;
+ }
+
+ protected synchronized boolean moveToNext(K key, V value)
+ throws IOException {
+ recIndex++;
+ return super.moveToNext(key, value);
+ }
+
+ @SuppressWarnings("unchecked")
+ private void writeSkippedRec(K key, V value) throws IOException{
+ if(skipWriter==null) {
+ Path skipDir = SkipBadRecords.getSkipOutputPath(conf);
+ Path skipFile = new Path(skipDir, getTaskID().toString());
+ skipWriter =
+ SequenceFile.createWriter(
+ skipFile.getFileSystem(conf), conf, skipFile,
+ (Class<K>) createKey().getClass(),
+ (Class<V>) createValue().getClass(),
+ CompressionType.BLOCK, getTaskReporter());
+ }
+ skipWriter.append(key, value);
+ }
+ }
+
+ @Override
+ public void run(final JobConf job, final TaskUmbilicalProtocol umbilical)
+ throws IOException, ClassNotFoundException, InterruptedException {
+ this.umbilical = umbilical;
+
+ if (isMapTask()) {
+ // If there are no reducers then there won't be any sort. Hence the map
+ // phase will govern the entire attempt's progress.
+ if (conf.getNumReduceTasks() == 0) {
+ mapPhase = getProgress().addPhase("map", 1.0f);
+ } else {
+ // If there are reducers then the entire attempt's progress will be
+ // split between the map phase (67%) and the sort phase (33%).
+ mapPhase = getProgress().addPhase("map", 0.667f);
+ sortPhase = getProgress().addPhase("sort", 0.333f);
+ }
+ }
+ TaskReporter reporter = startReporter(umbilical);
+
+ boolean useNewApi = job.getUseNewMapper();
+ initialize(job, getJobID(), reporter, useNewApi);
+
+ // check if it is a cleanupJobTask
+ if (jobCleanup) {
+ runJobCleanupTask(umbilical, reporter);
+ return;
+ }
+ if (jobSetup) {
+ runJobSetupTask(umbilical, reporter);
+ return;
+ }
+ if (taskCleanup) {
+ runTaskCleanupTask(umbilical, reporter);
+ return;
+ }
+
+ if (useNewApi) {
+ runNewMapper(job, splitMetaInfo, umbilical, reporter);
+ } else {
+ runOldMapper(job, splitMetaInfo, umbilical, reporter);
+ }
+ done(umbilical, reporter);
+ }
+
+ @SuppressWarnings("unchecked")
+ private <T> T getSplitDetails(Path file, long offset)
+ throws IOException {
+ FileSystem fs = file.getFileSystem(conf);
+ FSDataInputStream inFile = fs.open(file);
+ inFile.seek(offset);
+ String className = Text.readString(inFile);
+ Class<T> cls;
+ try {
+ cls = (Class<T>) conf.getClassByName(className);
+ } catch (ClassNotFoundException ce) {
+ IOException wrap = new IOException("Split class " + className +
+ " not found");
+ wrap.initCause(ce);
+ throw wrap;
+ }
+ SerializationFactory factory = new SerializationFactory(conf);
+ Deserializer<T> deserializer =
+ (Deserializer<T>) factory.getDeserializer(cls);
+ deserializer.open(inFile);
+ T split = deserializer.deserialize(null);
+ long pos = inFile.getPos();
+ getCounters().findCounter(
+ TaskCounter.SPLIT_RAW_BYTES).increment(pos - offset);
+ inFile.close();
+ return split;
+ }
+
+ @SuppressWarnings("unchecked")
+ private <INKEY,INVALUE,OUTKEY,OUTVALUE>
+ void runOldMapper(final JobConf job,
+ final TaskSplitIndex splitIndex,
+ final TaskUmbilicalProtocol umbilical,
+ TaskReporter reporter
+ ) throws IOException, InterruptedException,
+ ClassNotFoundException {
+ InputSplit inputSplit = getSplitDetails(new Path(splitIndex.getSplitLocation()),
+ splitIndex.getStartOffset());
+
+ updateJobWithSplit(job, inputSplit);
+ reporter.setInputSplit(inputSplit);
+
+ RecordReader<INKEY,INVALUE> in = isSkipping() ?
+ new SkippingRecordReader<INKEY,INVALUE>(umbilical, reporter, job) :
+ new TrackedRecordReader<INKEY,INVALUE>(reporter, job);
+ job.setBoolean(JobContext.SKIP_RECORDS, isSkipping());
+
+
+ int numReduceTasks = conf.getNumReduceTasks();
+ LOG.info("numReduceTasks: " + numReduceTasks);
+ MapOutputCollector collector = null;
+ if (numReduceTasks > 0) {
+ collector = new MapOutputBuffer(umbilical, job, reporter);
+ } else {
+ collector = new DirectMapOutputCollector(umbilical, job, reporter);
+ }
+ MapRunnable<INKEY,INVALUE,OUTKEY,OUTVALUE> runner =
+ ReflectionUtils.newInstance(job.getMapRunnerClass(), job);
+
+ try {
+ runner.run(in, new OldOutputCollector(collector, conf), reporter);
+ mapPhase.complete();
+ // start the sort phase only if there are reducers
+ if (numReduceTasks > 0) {
+ setPhase(TaskStatus.Phase.SORT);
+ }
+ statusUpdate(umbilical);
+ collector.flush();
+ } finally {
+ //close
+ in.close(); // close input
+ collector.close();
+ }
+ }
+
+ /**
+ * Update the job with details about the file split
+ * @param job the job configuration to update
+ * @param inputSplit the file split
+ */
+ private void updateJobWithSplit(final JobConf job, InputSplit inputSplit) {
+ if (inputSplit instanceof FileSplit) {
+ FileSplit fileSplit = (FileSplit) inputSplit;
+ job.set(JobContext.MAP_INPUT_FILE, fileSplit.getPath().toString());
+ job.setLong(JobContext.MAP_INPUT_START, fileSplit.getStart());
+ job.setLong(JobContext.MAP_INPUT_PATH, fileSplit.getLength());
+ }
+ }
+
+ static class NewTrackingRecordReader<K,V>
+ extends org.apache.hadoop.mapreduce.RecordReader<K,V> {
+ private final org.apache.hadoop.mapreduce.RecordReader<K,V> real;
+ private final org.apache.hadoop.mapreduce.Counter inputRecordCounter;
+ private final org.apache.hadoop.mapreduce.Counter fileInputByteCounter;
+ private final TaskReporter reporter;
+ private final Statistics fsStats;
+
+ NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
+ org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
+ TaskReporter reporter,
+ org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
+ throws InterruptedException, IOException {
+ this.reporter = reporter;
+ this.inputRecordCounter = reporter
+ .getCounter(TaskCounter.MAP_INPUT_RECORDS);
+ this.fileInputByteCounter = reporter
+ .getCounter(FileInputFormatCounter.BYTES_READ);
+
+ Statistics matchedStats = null;
+ if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
+ matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
+ .getPath(), taskContext.getConfiguration());
+ }
+ fsStats = matchedStats;
+
+ long bytesInPrev = getInputBytes(fsStats);
+ this.real = inputFormat.createRecordReader(split, taskContext);
+ long bytesInCurr = getInputBytes(fsStats);
+ fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
+ }
+
+ @Override
+ public void close() throws IOException {
+ long bytesInPrev = getInputBytes(fsStats);
+ real.close();
+ long bytesInCurr = getInputBytes(fsStats);
+ fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
+ }
+
+ @Override
+ public K getCurrentKey() throws IOException, InterruptedException {
+ return real.getCurrentKey();
+ }
+
+ @Override
+ public V getCurrentValue() throws IOException, InterruptedException {
+ return real.getCurrentValue();
+ }
+
+ @Override
+ public float getProgress() throws IOException, InterruptedException {
+ return real.getProgress();
+ }
+
+ @Override
+ public void initialize(org.apache.hadoop.mapreduce.InputSplit split,
+ org.apache.hadoop.mapreduce.TaskAttemptContext context
+ ) throws IOException, InterruptedException {
+ long bytesInPrev = getInputBytes(fsStats);
+ real.initialize(split, context);
+ long bytesInCurr = getInputBytes(fsStats);
+ fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
+ }
+
+ @Override
+ public boolean nextKeyValue() throws IOException, InterruptedException {
+ long bytesInPrev = getInputBytes(fsStats);
+ boolean result = real.nextKeyValue();
+ long bytesInCurr = getInputBytes(fsStats);
+ if (result) {
+ inputRecordCounter.increment(1);
+ }
+ fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
+ reporter.setProgress(getProgress());
+ return result;
+ }
+
+ private long getInputBytes(Statistics stats) {
+ return stats == null ? 0 : stats.getBytesRead();
+ }
+ }
+
+ /**
+ * Since the mapred and mapreduce Partitioners don't share a common interface
+ * (JobConfigurable is deprecated and a subtype of mapred.Partitioner), the
+ * partitioner lives in Old/NewOutputCollector. Note that, for map-only jobs,
+ * the configured partitioner should not be called. It's common for
+ * partitioners to compute a result mod numReduces, which causes a div0 error
+ */
+ private static class OldOutputCollector<K,V> implements OutputCollector<K,V> {
+ private final Partitioner<K,V> partitioner;
+ private final MapOutputCollector<K,V> collector;
+ private final int numPartitions;
+
+ @SuppressWarnings("unchecked")
+ OldOutputCollector(MapOutputCollector<K,V> collector, JobConf conf) {
+ numPartitions = conf.getNumReduceTasks();
+ if (numPartitions > 1) {
+ partitioner = (Partitioner<K,V>)
+ ReflectionUtils.newInstance(conf.getPartitionerClass(), conf);
+ } else {
+ partitioner = new Partitioner<K,V>() {
+ @Override
+ public void configure(JobConf job) { }
+ @Override
+ public int getPartition(K key, V value, int numPartitions) {
+ return numPartitions - 1;
+ }
+ };
+ }
+ this.collector = collector;
+ }
+
+ @Override
+ public void collect(K key, V value) throws IOException {
+ try {
+ collector.collect(key, value,
+ partitioner.getPartition(key, value, numPartitions));
+ } catch (InterruptedException ie) {
+ Thread.currentThread().interrupt();
+ throw new IOException("interrupt exception", ie);
+ }
+ }
+ }
+
+ private class NewDirectOutputCollector<K,V>
+ extends org.apache.hadoop.mapreduce.RecordWriter<K,V> {
+ private final org.apache.hadoop.mapreduce.RecordWriter out;
+
+ private final TaskReporter reporter;
+
+ private final Counters.Counter mapOutputRecordCounter;
+ private final Counters.Counter fileOutputByteCounter;
+ private final Statistics fsStats;
+
+ @SuppressWarnings("unchecked")
+ NewDirectOutputCollector(MRJobConfig jobContext,
+ JobConf job, TaskUmbilicalProtocol umbilical, TaskReporter reporter)
+ throws IOException, ClassNotFoundException, InterruptedException {
+ this.reporter = reporter;
+ mapOutputRecordCounter = reporter
+ .getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
+ fileOutputByteCounter = reporter
+ .getCounter(FileOutputFormatCounter.BYTES_WRITTEN);
+
+ Statistics matchedStats = null;
+ if (outputFormat instanceof org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) {
+ matchedStats = getFsStatistics(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
+ .getOutputPath(taskContext), taskContext.getConfiguration());
+ }
+ fsStats = matchedStats;
+
+ long bytesOutPrev = getOutputBytes(fsStats);
+ out = outputFormat.getRecordWriter(taskContext);
+ long bytesOutCurr = getOutputBytes(fsStats);
+ fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public void write(K key, V value)
+ throws IOException, InterruptedException {
+ reporter.progress();
+ long bytesOutPrev = getOutputBytes(fsStats);
+ out.write(key, value);
+ long bytesOutCurr = getOutputBytes(fsStats);
+ fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
+ mapOutputRecordCounter.increment(1);
+ }
+
+ @Override
+ public void close(TaskAttemptContext context)
+ throws IOException,InterruptedException {
+ reporter.progress();
+ if (out != null) {
+ long bytesOutPrev = getOutputBytes(fsStats);
+ out.close(context);
+ long bytesOutCurr = getOutputBytes(fsStats);
+ fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
+ }
+ }
+
+ private long getOutputBytes(Statistics stats) {
+ return stats == null ? 0 : stats.getBytesWritten();
+ }
+ }
+
+ private class NewOutputCollector<K,V>
+ extends org.apache.hadoop.mapreduce.RecordWriter<K,V> {
+ private final MapOutputCollector<K,V> collector;
+ private final org.apache.hadoop.mapreduce.Partitioner<K,V> partitioner;
+ private final int partitions;
+
+ @SuppressWarnings("unchecked")
+ NewOutputCollector(org.apache.hadoop.mapreduce.JobContext jobContext,
+ JobConf job,
+ TaskUmbilicalProtocol umbilical,
+ TaskReporter reporter
+ ) throws IOException, ClassNotFoundException {
+ collector = new MapOutputBuffer<K,V>(umbilical, job, reporter);
+ partitions = jobContext.getNumReduceTasks();
+ if (partitions > 1) {
+ partitioner = (org.apache.hadoop.mapreduce.Partitioner<K,V>)
+ ReflectionUtils.newInstance(jobContext.getPartitionerClass(), job);
+ } else {
+ partitioner = new org.apache.hadoop.mapreduce.Partitioner<K,V>() {
+ @Override
+ public int getPartition(K key, V value, int numPartitions) {
+ return partitions - 1;
+ }
+ };
+ }
+ }
+
+ @Override
+ public void write(K key, V value) throws IOException, InterruptedException {
+ collector.collect(key, value,
+ partitioner.getPartition(key, value, partitions));
+ }
+
+ @Override
+ public void close(TaskAttemptContext context
+ ) throws IOException,InterruptedException {
+ try {
+ collector.flush();
+ } catch (ClassNotFoundException cnf) {
+ throw new IOException("can't find class ", cnf);
+ }
+ collector.close();
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ private <INKEY,INVALUE,OUTKEY,OUTVALUE>
+ void runNewMapper(final JobConf job,
+ final TaskSplitIndex splitIndex,
+ final TaskUmbilicalProtocol umbilical,
+ TaskReporter reporter
+ ) throws IOException, ClassNotFoundException,
+ InterruptedException {
+ // make a task context so we can get the classes
+ org.apache.hadoop.mapreduce.TaskAttemptContext taskContext =
+ new org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl(job,
+ getTaskID(),
+ reporter);
+ // make a mapper
+ org.apache.hadoop.mapreduce.Mapper<INKEY,INVALUE,OUTKEY,OUTVALUE> mapper =
+ (org.apache.hadoop.mapreduce.Mapper<INKEY,INVALUE,OUTKEY,OUTVALUE>)
+ ReflectionUtils.newInstance(taskContext.getMapperClass(), job);
+ // make the input format
+ org.apache.hadoop.mapreduce.InputFormat<INKEY,INVALUE> inputFormat =
+ (org.apache.hadoop.mapreduce.InputFormat<INKEY,INVALUE>)
+ ReflectionUtils.newInstance(taskContext.getInputFormatClass(), job);
+ // rebuild the input split
+ org.apache.hadoop.mapreduce.InputSplit split = null;
+ split = getSplitDetails(new Path(splitIndex.getSplitLocation()),
+ splitIndex.getStartOffset());
+
+ org.apache.hadoop.mapreduce.RecordReader<INKEY,INVALUE> input =
+ new NewTrackingRecordReader<INKEY,INVALUE>
+ (split, inputFormat, reporter, taskContext);
+
+ job.setBoolean(JobContext.SKIP_RECORDS, isSkipping());
+ org.apache.hadoop.mapreduce.RecordWriter output = null;
+
+ // get an output object
+ if (job.getNumReduceTasks() == 0) {
+ output =
+ new NewDirectOutputCollector(taskContext, job, umbilical, reporter);
+ } else {
+ output = new NewOutputCollector(taskContext, job, umbilical, reporter);
+ }
+
+ org.apache.hadoop.mapreduce.MapContext<INKEY, INVALUE, OUTKEY, OUTVALUE>
+ mapContext =
+ new MapContextImpl<INKEY, INVALUE, OUTKEY, OUTVALUE>(job, getTaskID(),
+ input, output,
+ committer,
+ reporter, split);
+
+ org.apache.hadoop.mapreduce.Mapper<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context
+ mapperContext =
+ new WrappedMapper<INKEY, INVALUE, OUTKEY, OUTVALUE>().getMapContext(
+ mapContext);
+
+ input.initialize(split, mapperContext);
+ mapper.run(mapperContext);
+ mapPhase.complete();
+ setPhase(TaskStatus.Phase.SORT);
+ statusUpdate(umbilical);
+ input.close();
+ output.close(mapperContext);
+ }
+
+ interface MapOutputCollector<K, V> {
+
+ public void collect(K key, V value, int partition
+ ) throws IOException, InterruptedException;
+ public void close() throws IOException, InterruptedException;
+
+ public void flush() throws IOException, InterruptedException,
+ ClassNotFoundException;
+
+ }
+
+ class DirectMapOutputCollector<K, V>
+ implements MapOutputCollector<K, V> {
+
+ private RecordWriter<K, V> out = null;
+
+ private TaskReporter reporter = null;
+
+ private final Counters.Counter mapOutputRecordCounter;
+ private final Counters.Counter fileOutputByteCounter;
+ private final Statistics fsStats;
+
+ @SuppressWarnings("unchecked")
+ public DirectMapOutputCollector(TaskUmbilicalProtocol umbilical,
+ JobConf job, TaskReporter reporter) throws IOException {
+ this.reporter = reporter;
+ String finalName = getOutputName(getPartition());
+ FileSystem fs = FileSystem.get(job);
+
+ OutputFormat<K, V> outputFormat = job.getOutputFormat();
+ mapOutputRecordCounter = reporter.getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
+
+ fileOutputByteCounter = reporter
+ .getCounter(FileOutputFormatCounter.BYTES_WRITTEN);
+
+ Statistics matchedStats = null;
+ if (outputFormat instanceof FileOutputFormat) {
+ matchedStats = getFsStatistics(FileOutputFormat.getOutputPath(job), job);
+ }
+ fsStats = matchedStats;
+
+ long bytesOutPrev = getOutputBytes(fsStats);
+ out = job.getOutputFormat().getRecordWriter(fs, job, finalName, reporter);
+ long bytesOutCurr = getOutputBytes(fsStats);
+ fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
+ }
+
+ public void close() throws IOException {
+ if (this.out != null) {
+ long bytesOutPrev = getOutputBytes(fsStats);
+ out.close(this.reporter);
+ long bytesOutCurr = getOutputBytes(fsStats);
+ fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
+ }
+
+ }
+
+ public void flush() throws IOException, InterruptedException,
+ ClassNotFoundException {
+ }
+
+ public void collect(K key, V value, int partition) throws IOException {
+ reporter.progress();
+ long bytesOutPrev = getOutputBytes(fsStats);
+ out.write(key, value);
+ long bytesOutCurr = getOutputBytes(fsStats);
+ fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
+ mapOutputRecordCounter.increment(1);
+ }
+
+ private long getOutputBytes(Statistics stats) {
+ return stats == null ? 0 : stats.getBytesWritten();
+ }
+ }
+
+ private class MapOutputBuffer<K extends Object, V extends Object>
+ implements MapOutputCollector<K, V>, IndexedSortable {
+ final int partitions;
+ final JobConf job;
+ final TaskReporter reporter;
+ final Class<K> keyClass;
+ final Class<V> valClass;
+ final RawComparator<K> comparator;
+ final SerializationFactory serializationFactory;
+ final Serializer<K> keySerializer;
+ final Serializer<V> valSerializer;
+ final CombinerRunner<K,V> combinerRunner;
+ final CombineOutputCollector<K, V> combineCollector;
+
+ // Compression for map-outputs
+ final CompressionCodec codec;
+
+ // k/v accounting
+ final IntBuffer kvmeta; // metadata overlay on backing store
+ int kvstart; // marks origin of spill metadata
+ int kvend; // marks end of spill metadata
+ int kvindex; // marks end of fully serialized records
+
+ int equator; // marks origin of meta/serialization
+ int bufstart; // marks beginning of spill
+ int bufend; // marks beginning of collectable
+ int bufmark; // marks end of record
+ int bufindex; // marks end of collected
+ int bufvoid; // marks the point where we should stop
+ // reading at the end of the buffer
+
+ byte[] kvbuffer; // main output buffer
+ private final byte[] b0 = new byte[0];
+
+ private static final int INDEX = 0; // index offset in acct
+ private static final int VALSTART = 1; // val offset in acct
+ private static final int KEYSTART = 2; // key offset in acct
+ private static final int PARTITION = 3; // partition offset in acct
+ private static final int NMETA = 4; // num meta ints
+ private static final int METASIZE = NMETA * 4; // size in bytes
+
+ // spill accounting
+ final int maxRec;
+ final int softLimit;
+ boolean spillInProgress;;
+ int bufferRemaining;
+ volatile Throwable sortSpillException = null;
+
+ int numSpills = 0;
+ final int minSpillsForCombine;
+ final IndexedSorter sorter;
+ final ReentrantLock spillLock = new ReentrantLock();
+ final Condition spillDone = spillLock.newCondition();
+ final Condition spillReady = spillLock.newCondition();
+ final BlockingBuffer bb = new BlockingBuffer();
+ volatile boolean spillThreadRunning = false;
+ final SpillThread spillThread = new SpillThread();
+
+ final FileSystem rfs;
+
+ // Counters
+ final Counters.Counter mapOutputByteCounter;
+ final Counters.Counter mapOutputRecordCounter;
+ final Counters.Counter fileOutputByteCounter;
+
+ final ArrayList<SpillRecord> indexCacheList =
+ new ArrayList<SpillRecord>();
+ private int totalIndexCacheMemory;
+ private int indexCacheMemoryLimit;
+ private static final int INDEX_CACHE_MEMORY_LIMIT_DEFAULT = 1024 * 1024;
+
+ @SuppressWarnings("unchecked")
+ public MapOutputBuffer(TaskUmbilicalProtocol umbilical, JobConf job,
+ TaskReporter reporter
+ ) throws IOException, ClassNotFoundException {
+ this.job = job;
+ this.reporter = reporter;
+ partitions = job.getNumReduceTasks();
+ rfs = ((LocalFileSystem)FileSystem.getLocal(job)).getRaw();
+
+ //sanity checks
+ final float spillper =
+ job.getFloat(JobContext.MAP_SORT_SPILL_PERCENT, (float)0.8);
+ final int sortmb = job.getInt(JobContext.IO_SORT_MB, 100);
+ indexCacheMemoryLimit = job.getInt(JobContext.INDEX_CACHE_MEMORY_LIMIT,
+ INDEX_CACHE_MEMORY_LIMIT_DEFAULT);
+ if (spillper > (float)1.0 || spillper <= (float)0.0) {
+ throw new IOException("Invalid \"" + JobContext.MAP_SORT_SPILL_PERCENT +
+ "\": " + spillper);
+ }
+ if ((sortmb & 0x7FF) != sortmb) {
+ throw new IOException(
+ "Invalid \"" + JobContext.IO_SORT_MB + "\": " + sortmb);
+ }
+ sorter = ReflectionUtils.newInstance(job.getClass("map.sort.class",
+ QuickSort.class, IndexedSorter.class), job);
+ // buffers and accounting
+ int maxMemUsage = sortmb << 20;
+ maxMemUsage -= maxMemUsage % METASIZE;
+ kvbuffer = new byte[maxMemUsage];
+ bufvoid = kvbuffer.length;
+ kvmeta = ByteBuffer.wrap(kvbuffer).asIntBuffer();
+ setEquator(0);
+ bufstart = bufend = bufindex = equator;
+ kvstart = kvend = kvindex;
+
+ maxRec = kvmeta.capacity() / NMETA;
+ softLimit = (int)(kvbuffer.length * spillper);
+ bufferRemaining = softLimit;
+ if (LOG.isInfoEnabled()) {
+ LOG.info(JobContext.IO_SORT_MB + ": " + sortmb);
+ LOG.info("soft limit at " + softLimit);
+ LOG.info("bufstart = " + bufstart + "; bufvoid = " + bufvoid);
+ LOG.info("kvstart = " + kvstart + "; length = " + maxRec);
+ }
+
+ // k/v serialization
+ comparator = job.getOutputKeyComparator();
+ keyClass = (Class<K>)job.getMapOutputKeyClass();
+ valClass = (Class<V>)job.getMapOutputValueClass();
+ serializationFactory = new SerializationFactory(job);
+ keySerializer = serializationFactory.getSerializer(keyClass);
+ keySerializer.open(bb);
+ valSerializer = serializationFactory.getSerializer(valClass);
+ valSerializer.open(bb);
+
+ // output counters
+ mapOutputByteCounter = reporter.getCounter(TaskCounter.MAP_OUTPUT_BYTES);
+ mapOutputRecordCounter =
+ reporter.getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
+ fileOutputByteCounter = reporter
+ .getCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES);
+
+ // compression
+ if (job.getCompressMapOutput()) {
+ Class<? extends CompressionCodec> codecClass =
+ job.getMapOutputCompressorClass(DefaultCodec.class);
+ codec = ReflectionUtils.newInstance(codecClass, job);
+ } else {
+ codec = null;
+ }
+
+ // combiner
+ final Counters.Counter combineInputCounter =
+ reporter.getCounter(TaskCounter.COMBINE_INPUT_RECORDS);
+ combinerRunner = CombinerRunner.create(job, getTaskID(),
+ combineInputCounter,
+ reporter, null);
+ if (combinerRunner != null) {
+ final Counters.Counter combineOutputCounter =
+ reporter.getCounter(TaskCounter.COMBINE_OUTPUT_RECORDS);
+ combineCollector= new CombineOutputCollector<K,V>(combineOutputCounter, reporter, conf);
+ } else {
+ combineCollector = null;
+ }
+ spillInProgress = false;
+ minSpillsForCombine = job.getInt(JobContext.MAP_COMBINE_MIN_SPILLS, 3);
+ spillThread.setDaemon(true);
+ spillThread.setName("SpillThread");
+ spillLock.lock();
+ try {
+ spillThread.start();
+ while (!spillThreadRunning) {
+ spillDone.await();
+ }
+ } catch (InterruptedException e) {
+ throw new IOException("Spill thread failed to initialize", e);
+ } finally {
+ spillLock.unlock();
+ }
+ if (sortSpillException != null) {
+ throw new IOException("Spill thread failed to initialize",
+ sortSpillException);
+ }
+ }
+
+ /**
+ * Serialize the key, value to intermediate storage.
+ * When this method returns, kvindex must refer to sufficient unused
+ * storage to store one METADATA.
+ */
+ public synchronized void collect(K key, V value, final int partition
+ ) throws IOException {
+ reporter.progress();
+ if (key.getClass() != keyClass) {
+ throw new IOException("Type mismatch in key from map: expected "
+ + keyClass.getName() + ", received "
+ + key.getClass().getName());
+ }
+ if (value.getClass() != valClass) {
+ throw new IOException("Type mismatch in value from map: expected "
+ + valClass.getName() + ", received "
+ + value.getClass().getName());
+ }
+ if (partition < 0 || partition >= partitions) {
+ throw new IOException("Illegal partition for " + key + " (" +
+ partition + ")");
+ }
+ checkSpillException();
+ bufferRemaining -= METASIZE;
+ if (bufferRemaining <= 0) {
+ // start spill if the thread is not running and the soft limit has been
+ // reached
+ spillLock.lock();
+ try {
+ do {
+ if (!spillInProgress) {
+ final int kvbidx = 4 * kvindex;
+ final int kvbend = 4 * kvend;
+ // serialized, unspilled bytes always lie between kvindex and
+ // bufindex, crossing the equator. Note that any void space
+ // created by a reset must be included in "used" bytes
+ final int bUsed = distanceTo(kvbidx, bufindex);
+ final boolean bufsoftlimit = bUsed >= softLimit;
+ if ((kvbend + METASIZE) % kvbuffer.length !=
+ equator - (equator % METASIZE)) {
+ // spill finished, reclaim space
+ resetSpill();
+ bufferRemaining = Math.min(
+ distanceTo(bufindex, kvbidx) - 2 * METASIZE,
+ softLimit - bUsed) - METASIZE;
+ continue;
+ } else if (bufsoftlimit && kvindex != kvend) {
+ // spill records, if any collected; check latter, as it may
+ // be possible for metadata alignment to hit spill pcnt
+ startSpill();
+ final int avgRec = (int)
+ (mapOutputByteCounter.getCounter() /
+ mapOutputRecordCounter.getCounter());
+ // leave at least half the split buffer for serialization data
+ // ensure that kvindex >= bufindex
+ final int distkvi = distanceTo(bufindex, kvbidx);
+ final int newPos = (bufindex +
+ Math.max(2 * METASIZE - 1,
+ Math.min(distkvi / 2,
+ distkvi / (METASIZE + avgRec) * METASIZE)))
+ % kvbuffer.length;
+ setEquator(newPos);
+ bufmark = bufindex = newPos;
+ final int serBound = 4 * kvend;
+ // bytes remaining before the lock must be held and limits
+ // checked is the minimum of three arcs: the metadata space, the
+ // serialization space, and the soft limit
+ bufferRemaining = Math.min(
+ // metadata max
+ distanceTo(bufend, newPos),
+ Math.min(
+ // serialization max
+ distanceTo(newPos, serBound),
+ // soft limit
+ softLimit)) - 2 * METASIZE;
+ }
+ }
+ } while (false);
+ } finally {
+ spillLock.unlock();
+ }
+ }
+
+ try {
+ // serialize key bytes into buffer
+ int keystart = bufindex;
+ keySerializer.serialize(key);
+ if (bufindex < keystart) {
+ // wrapped the key; must make contiguous
+ bb.shiftBufferedKey();
+ keystart = 0;
+ }
+ // serialize value bytes into buffer
+ final int valstart = bufindex;
+ valSerializer.serialize(value);
+ // It's possible for records to have zero length, i.e. the serializer
+ // will perform no writes. To ensure that the boundary conditions are
+ // checked and that the kvindex invariant is maintained, perform a
+ // zero-length write into the buffer. The logic monitoring this could be
+ // moved into collect, but this is cleaner and inexpensive. For now, it
+ // is acceptable.
+ bb.write(b0, 0, 0);
+
+ // the record must be marked after the preceding write, as the metadata
+ // for this record are not yet written
+ int valend = bb.markRecord();
+
+ mapOutputRecordCounter.increment(1);
+ mapOutputByteCounter.increment(
+ distanceTo(keystart, valend, bufvoid));
+
+ // write accounting info
+ kvmeta.put(kvindex + INDEX, kvindex);
+ kvmeta.put(kvindex + PARTITION, partition);
+ kvmeta.put(kvindex + KEYSTART, keystart);
+ kvmeta.put(kvindex + VALSTART, valstart);
+ // advance kvindex
+ kvindex = (kvindex - NMETA + kvmeta.capacity()) % kvmeta.capacity();
+ } catch (MapBufferTooSmallException e) {
+ LOG.info("Record too large for in-memory buffer: " + e.getMessage());
+ spillSingleRecord(key, value, partition);
+ mapOutputRecordCounter.increment(1);
+ return;
+ }
+ }
+
+ /**
+ * Set the point from which meta and serialization data expand. The meta
+ * indices are aligned with the buffer, so metadata never spans the ends of
+ * the circular buffer.
+ */
+ private void setEquator(int pos) {
+ equator = pos;
+ // set index prior to first entry, aligned at meta boundary
+ final int aligned = pos - (pos % METASIZE);
+ kvindex =
+ ((aligned - METASIZE + kvbuffer.length) % kvbuffer.length) / 4;
+ if (LOG.isInfoEnabled()) {
+ LOG.info("(EQUATOR) " + pos + " kvi " + kvindex +
+ "(" + (kvindex * 4) + ")");
+ }
+ }
+
+ /**
+ * The spill is complete, so set the buffer and meta indices to be equal to
+ * the new equator to free space for continuing collection. Note that when
+ * kvindex == kvend == kvstart, the buffer is empty.
+ */
+ private void resetSpill() {
+ final int e = equator;
+ bufstart = bufend = e;
+ final int aligned = e - (e % METASIZE);
+ // set start/end to point to first meta record
+ kvstart = kvend =
+ ((aligned - METASIZE + kvbuffer.length) % kvbuffer.length) / 4;
+ if (LOG.isInfoEnabled()) {
+ LOG.info("(RESET) equator " + e + " kv " + kvstart + "(" +
+ (kvstart * 4) + ")" + " kvi " + kvindex + "(" + (kvindex * 4) + ")");
+ }
+ }
+
+ /**
+ * Compute the distance in bytes between two indices in the serialization
+ * buffer.
+ * @see #distanceTo(int,int,int)
+ */
+ final int distanceTo(final int i, final int j) {
+ return distanceTo(i, j, kvbuffer.length);
+ }
+
+ /**
+ * Compute the distance between two indices in the circular buffer given the
+ * max distance.
+ */
+ int distanceTo(final int i, final int j, final int mod) {
+ return i <= j
+ ? j - i
+ : mod - i + j;
+ }
+
+ /**
+ * For the given meta position, return the dereferenced position in the
+ * integer array. Each meta block contains several integers describing
+ * record data in its serialized form, but the INDEX is not necessarily
+ * related to the proximate metadata. The index value at the referenced int
+ * position is the start offset of the associated metadata block. So the
+ * metadata INDEX at metapos may point to the metadata described by the
+ * metadata block at metapos + k, which contains information about that
+ * serialized record.
+ */
+ int offsetFor(int metapos) {
+ return kvmeta.get(metapos * NMETA + INDEX);
+ }
+
+ /**
+ * Compare logical range, st i, j MOD offset capacity.
+ * Compare by partition, then by key.
+ * @see IndexedSortable#compare
+ */
+ public int compare(final int mi, final int mj) {
+ final int kvi = offsetFor(mi % maxRec);
+ final int kvj = offsetFor(mj % maxRec);
+ final int kvip = kvmeta.get(kvi + PARTITION);
+ final int kvjp = kvmeta.get(kvj + PARTITION);
+ // sort by partition
+ if (kvip != kvjp) {
+ return kvip - kvjp;
+ }
+ // sort by key
+ return comparator.compare(kvbuffer,
+ kvmeta.get(kvi + KEYSTART),
+ kvmeta.get(kvi + VALSTART) - kvmeta.get(kvi + KEYSTART),
+ kvbuffer,
+ kvmeta.get(kvj + KEYSTART),
+ kvmeta.get(kvj + VALSTART) - kvmeta.get(kvj + KEYSTART));
+ }
+
+ /**
+ * Swap logical indices st i, j MOD offset capacity.
+ * @see IndexedSortable#swap
+ */
+ public void swap(final int mi, final int mj) {
+ final int kvi = (mi % maxRec) * NMETA + INDEX;
+ final int kvj = (mj % maxRec) * NMETA + INDEX;
+ int tmp = kvmeta.get(kvi);
+ kvmeta.put(kvi, kvmeta.get(kvj));
+ kvmeta.put(kvj, tmp);
+ }
+
+ /**
+ * Inner class managing the spill of serialized records to disk.
+ */
+ protected class BlockingBuffer extends DataOutputStream {
+
+ public BlockingBuffer() {
+ super(new Buffer());
+ }
+
+ /**
+ * Mark end of record. Note that this is required if the buffer is to
+ * cut the spill in the proper place.
+ */
+ public int markRecord() {
+ bufmark = bufindex;
+ return bufindex;
+ }
+
+ /**
+ * Set position from last mark to end of writable buffer, then rewrite
+ * the data between last mark and kvindex.
+ * This handles a special case where the key wraps around the buffer.
+ * If the key is to be passed to a RawComparator, then it must be
+ * contiguous in the buffer. This recopies the data in the buffer back
+ * into itself, but starting at the beginning of the buffer. Note that
+ * this method should <b>only</b> be called immediately after detecting
+ * this condition. To call it at any other time is undefined and would
+ * likely result in data loss or corruption.
+ * @see #markRecord()
+ */
+ protected void shiftBufferedKey() throws IOException {
+ // spillLock unnecessary; both kvend and kvindex are current
+ int headbytelen = bufvoid - bufmark;
+ bufvoid = bufmark;
+ final int kvbidx = 4 * kvindex;
+ final int kvbend = 4 * kvend;
+ final int avail =
+ Math.min(distanceTo(0, kvbidx), distanceTo(0, kvbend));
+ if (bufindex + headbytelen < avail) {
+ System.arraycopy(kvbuffer, 0, kvbuffer, headbytelen, bufindex);
+ System.arraycopy(kvbuffer, bufvoid, kvbuffer, 0, headbytelen);
+ bufindex += headbytelen;
+ bufferRemaining -= kvbuffer.length - bufvoid;
+ } else {
+ byte[] keytmp = new byte[bufindex];
+ System.arraycopy(kvbuffer, 0, keytmp, 0, bufindex);
+ bufindex = 0;
+ out.write(kvbuffer, bufmark, headbytelen);
+ out.write(keytmp);
+ }
+ }
+ }
+
+ public class Buffer extends OutputStream {
+ private final byte[] scratch = new byte[1];
+
+ @Override
+ public void write(int v)
+ throws IOException {
+ scratch[0] = (byte)v;
+ write(scratch, 0, 1);
+ }
+
+ /**
+ * Attempt to write a sequence of bytes to the collection buffer.
+ * This method will block if the spill thread is running and it
+ * cannot write.
+ * @throws MapBufferTooSmallException if record is too large to
+ * deserialize into the collection buffer.
+ */
+ @Override
+ public void write(byte b[], int off, int len)
+ throws IOException {
+ // must always verify the invariant that at least METASIZE bytes are
+ // available beyond kvindex, even when len == 0
+ bufferRemaining -= len;
+ if (bufferRemaining <= 0) {
+ // writing these bytes could exhaust available buffer space or fill
+ // the buffer to soft limit. check if spill or blocking are necessary
+ boolean blockwrite = false;
+ spillLock.lock();
+ try {
+ do {
+ checkSpillException();
+
+ final int kvbidx = 4 * kvindex;
+ final int kvbend = 4 * kvend;
+ // ser distance to key index
+ final int distkvi = distanceTo(bufindex, kvbidx);
+ // ser distance to spill end index
+ final int distkve = distanceTo(bufindex, kvbend);
+
+ // if kvindex is closer than kvend, then a spill is neither in
+ // progress nor complete and reset since the lock was held. The
+ // write should block only if there is insufficient space to
+ // complete the current write, write the metadata for this record,
+ // and write the metadata for the next record. If kvend is closer,
+ // then the write should block if there is too little space for
+ // either the metadata or the current write. Note that collect
+ // ensures its metadata requirement with a zero-length write
+ blockwrite = distkvi <= distkve
+ ? distkvi <= len + 2 * METASIZE
+ : distkve <= len || distanceTo(bufend, kvbidx) < 2 * METASIZE;
+
+ if (!spillInProgress) {
+ if (blockwrite) {
+ if ((kvbend + METASIZE) % kvbuffer.length !=
+ equator - (equator % METASIZE)) {
+ // spill finished, reclaim space
+ // need to use meta exclusively; zero-len rec & 100% spill
+ // pcnt would fail
+ resetSpill(); // resetSpill doesn't move bufindex, kvindex
+ bufferRemaining = Math.min(
+ distkvi - 2 * METASIZE,
+ softLimit - distanceTo(kvbidx, bufindex)) - len;
+ continue;
+ }
+ // we have records we can spill; only spill if blocked
+ if (kvindex != kvend) {
+ startSpill();
+ // Blocked on this write, waiting for the spill just
+ // initiated to finish. Instead of repositioning the marker
+ // and copying the partial record, we set the record start
+ // to be the new equator
+ setEquator(bufmark);
+ } else {
+ // We have no buffered records, and this record is too large
+ // to write into kvbuffer. We must spill it directly from
+ // collect
+ final int size = distanceTo(bufstart, bufindex) + len;
+ setEquator(0);
+ bufstart = bufend = bufindex = equator;
+ kvstart = kvend = kvindex;
+ bufvoid = kvbuffer.length;
+ throw new MapBufferTooSmallException(size + " bytes");
+ }
+ }
+ }
+
+ if (blockwrite) {
+ // wait for spill
+ try {
+ while (spillInProgress) {
+ reporter.progress();
+ spillDone.await();
+ }
+ } catch (InterruptedException e) {
+ throw new IOException(
+ "Buffer interrupted while waiting for the writer", e);
+ }
+ }
+ } while (blockwrite);
+ } finally {
+ spillLock.unlock();
+ }
+ }
+ // here, we know that we have sufficient space to write
+ if (bufindex + len > bufvoid) {
+ final int gaplen = bufvoid - bufindex;
+ System.arraycopy(b, off, kvbuffer, bufindex, gaplen);
+ len -= gaplen;
+ off += gaplen;
+ bufindex = 0;
+ }
+ System.arraycopy(b, off, kvbuffer, bufindex, len);
+ bufindex += len;
+ }
+ }
+
+ public void flush() throws IOException, ClassNotFoundException,
+ InterruptedException {
+ LOG.info("Starting flush of map output");
+ spillLock.lock();
+ try {
+ while (spillInProgress) {
+ reporter.progress();
+ spillDone.await();
+ }
+ checkSpillException();
+
+ final int kvbend = 4 * kvend;
+ if ((kvbend + METASIZE) % kvbuffer.length !=
+ equator - (equator % METASIZE)) {
+ // spill finished
+ resetSpill();
+ }
+ if (kvindex != kvend) {
+ kvend = (kvindex + NMETA) % kvmeta.capacity();
+ bufend = bufmark;
+ if (LOG.isInfoEnabled()) {
+ LOG.info("Spilling map output");
+ LOG.info("bufstart = " + bufstart + "; bufend = " + bufmark +
+ "; bufvoid = " + bufvoid);
+ LOG.info("kvstart = " + kvstart + "(" + (kvstart * 4) +
+ "); kvend = " + kvend + "(" + (kvend * 4) +
+ "); length = " + (distanceTo(kvend, kvstart,
+ kvmeta.capacity()) + 1) + "/" + maxRec);
+ }
+ sortAndSpill();
+ }
+ } catch (InterruptedException e) {
+ throw new IOException("Interrupted while waiting for the writer", e);
+ } finally {
+ spillLock.unlock();
+ }
+ assert !spillLock.isHeldByCurrentThread();
+ // shut down spill thread and wait for it to exit. Since the preceding
+ // ensures that it is finished with its work (and sortAndSpill did not
+ // throw), we elect to use an interrupt instead of setting a flag.
+ // Spilling simultaneously from this thread while the spill thread
+ // finishes its work might be both a useful way to extend this and also
+ // sufficient motivation for the latter approach.
+ try {
+ spillThread.interrupt();
+ spillThread.join();
+ } catch (InterruptedException e) {
+ throw new IOException("Spill failed", e);
+ }
+ // release sort buffer before the merge
+ kvbuffer = null;
+ mergeParts();
+ Path outputPath = mapOutputFile.getOutputFile();
+ fileOutputByteCounter.increment(rfs.getFileStatus(outputPath).getLen());
+ }
+
+ public void close() { }
+
+ protected class SpillThread extends Thread {
+
+ @Override
+ public void run() {
+ spillLock.lock();
+ spillThreadRunning = true;
+ try {
+ while (true) {
+ spillDone.signal();
+ while (!spillInProgress) {
+ spillReady.await();
+ }
+ try {
+ spillLock.unlock();
+ sortAndSpill();
+ } catch (Throwable t) {
+ sortSpillException = t;
+ } finally {
+ spillLock.lock();
+ if (bufend < bufstart) {
+ bufvoid = kvbuffer.length;
+ }
+ kvstart = kvend;
+ bufstart = bufend;
+ spillInProgress = false;
+ }
+ }
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ } finally {
+ spillLock.unlock();
+ spillThreadRunning = false;
+ }
+ }
+ }
+
+ private void checkSpillException() throws IOException {
+ final Throwable lspillException = sortSpillException;
+ if (lspillException != null) {
+ if (lspillException instanceof Error) {
+ final String logMsg = "Task " + getTaskID() + " failed : " +
+ StringUtils.stringifyException(lspillException);
+ reportFatalError(getTaskID(), lspillException, logMsg);
+ }
+ throw new IOException("Spill failed", lspillException);
+ }
+ }
+
+ private void startSpill() {
+ assert !spillInProgress;
+ kvend = (kvindex + NMETA) % kvmeta.capacity();
+ bufend = bufmark;
+ spillInProgress = true;
+ if (LOG.isInfoEnabled()) {
+ LOG.info("Spilling map output");
+ LOG.info("bufstart = " + bufstart + "; bufend = " + bufmark +
+ "; bufvoid = " + bufvoid);
+ LOG.info("kvstart = " + kvstart + "(" + (kvstart * 4) +
+ "); kvend = " + kvend + "(" + (kvend * 4) +
+ "); length = " + (distanceTo(kvend, kvstart,
+ kvmeta.capacity()) + 1) + "/" + maxRec);
+ }
+ spillReady.signal();
+ }
+
+ private void sortAndSpill() throws IOException, ClassNotFoundException,
+ InterruptedException {
+ //approximate the length of the output file to be the length of the
+ //buffer + header lengths for the partitions
+ final long size = (bufend >= bufstart
+ ? bufend - bufstart
+ : (bufvoid - bufend) + bufstart) +
+ partitions * APPROX_HEADER_LENGTH;
+ FSDataOutputStream out = null;
+ try {
+ // create spill file
+ final SpillRecord spillRec = new SpillRecord(partitions);
+ final Path filename =
+ mapOutputFile.getSpillFileForWrite(numSpills, size);
+ out = rfs.create(filename);
+
+ final int mstart = kvend / NMETA;
+ final int mend = 1 + // kvend is a valid record
+ (kvstart >= kvend
+ ? kvstart
+ : kvmeta.capacity() + kvstart) / NMETA;
+ sorter.sort(MapOutputBuffer.this, mstart, mend, reporter);
+ int spindex = mstart;
+ final IndexRecord rec = new IndexRecord();
+ final InMemValBytes value = new InMemValBytes();
+ for (int i = 0; i < partitions; ++i) {
+ IFile.Writer<K, V> writer = null;
+ try {
+ long segmentStart = out.getPos();
+ writer = new Writer<K, V>(job, out, keyClass, valClass, codec,
+ spilledRecordsCounter);
+ if (combinerRunner == null) {
+ // spill directly
+ DataInputBuffer key = new DataInputBuffer();
+ while (spindex < mend &&
+ kvmeta.get(offsetFor(spindex % maxRec) + PARTITION) == i) {
+ final int kvoff = offsetFor(spindex % maxRec);
+ key.reset(kvbuffer, kvmeta.get(kvoff + KEYSTART),
+ (kvmeta.get(kvoff + VALSTART) -
+ kvmeta.get(kvoff + KEYSTART)));
+ getVBytesForOffset(kvoff, value);
+ writer.append(key, value);
+ ++spindex;
+ }
+ } else {
+ int spstart = spindex;
+ while (spindex < mend &&
+ kvmeta.get(offsetFor(spindex % maxRec)
+ + PARTITION) == i) {
+ ++spindex;
+ }
+ // Note: we would like to avoid the combiner if we've fewer
+ // than some threshold of records for a partition
+ if (spstart != spindex) {
+ combineCollector.setWriter(writer);
+ RawKeyValueIterator kvIter =
+ new MRResultIterator(spstart, spindex);
+ combinerRunner.combine(kvIter, combineCollector);
+ }
+ }
+
+ // close the writer
+ writer.close();
+
+ // record offsets
+ rec.startOffset = segmentStart;
+ rec.rawLength = writer.getRawLength();
+ rec.partLength = writer.getCompressedLength();
+ spillRec.putIndex(rec, i);
+
+ writer = null;
+ } finally {
+ if (null != writer) writer.close();
+ }
+ }
+
+ if (totalIndexCacheMemory >= indexCacheMemoryLimit) {
+ // create spill index file
+ Path indexFilename =
+ mapOutputFile.getSpillIndexFileForWrite(numSpills, partitions
+ * MAP_OUTPUT_INDEX_RECORD_LENGTH);
+ spillRec.writeToFile(indexFilename, job);
+ } else {
+ indexCacheList.add(spillRec);
+ totalIndexCacheMemory +=
+ spillRec.size() * MAP_OUTPUT_INDEX_RECORD_LENGTH;
+ }
+ LOG.info("Finished spill " + numSpills);
+ ++numSpills;
+ } finally {
+ if (out != null) out.close();
+ }
+ }
+
+ /**
+ * Handles the degenerate case where serialization fails to fit in
+ * the in-memory buffer, so we must spill the record from collect
+ * directly to a spill file. Consider this "losing".
+ */
+ private void spillSingleRecord(final K key, final V value,
+ int partition) throws IOException {
+ long size = kvbuffer.length + partitions * APPROX_HEADER_LENGTH;
+ FSDataOutputStream out = null;
+ try {
+ // create spill file
+ final SpillRecord spillRec = new SpillRecord(partitions);
+ final Path filename =
+ mapOutputFile.getSpillFileForWrite(numSpills, size);
+ out = rfs.create(filename);
+
+ // we don't run the combiner for a single record
+ IndexRecord rec = new IndexRecord();
+ for (int i = 0; i < partitions; ++i) {
+ IFile.Writer<K, V> writer = null;
+ try {
+ long segmentStart = out.getPos();
+ // Create a new codec, don't care!
+ writer = new IFile.Writer<K,V>(job, out, keyClass, valClass, codec,
+ spilledRecordsCounter);
+
+ if (i == partition) {
+ final long recordStart = out.getPos();
+ writer.append(key, value);
+ // Note that our map byte count will not be accurate with
+ // compression
+ mapOutputByteCounter.increment(out.getPos() - recordStart);
+ }
+ writer.close();
+
+ // record offsets
+ rec.startOffset = segmentStart;
+ rec.rawLength = writer.getRawLength();
+ rec.partLength = writer.getCompressedLength();
+ spillRec.putIndex(rec, i);
+
+ writer = null;
+ } catch (IOException e) {
+ if (null != writer) writer.close();
+ throw e;
+ }
+ }
+ if (totalIndexCacheMemory >= indexCacheMemoryLimit) {
+ // create spill index file
+ Path indexFilename =
+ mapOutputFile.getSpillIndexFileForWrite(numSpills, partitions
+ * MAP_OUTPUT_INDEX_RECORD_LENGTH);
+ spillRec.writeToFile(indexFilename, job);
+ } else {
+ indexCacheList.add(spillRec);
+ totalIndexCacheMemory +=
+ spillRec.size() * MAP_OUTPUT_INDEX_RECORD_LENGTH;
+ }
+ ++numSpills;
+ } finally {
+ if (out != null) out.close();
+ }
+ }
+
+ /**
+ * Given an offset, populate vbytes with the associated set of
+ * deserialized value bytes. Should only be called during a spill.
+ */
+ private void getVBytesForOffset(int kvoff, InMemValBytes vbytes) {
+ // get the keystart for the next serialized value to be the end
+ // of this value. If this is the last value in the buffer, use bufend
+ final int nextindex = kvoff == kvend
+ ? bufend
+ : kvmeta.get(
+ (kvoff - NMETA + kvmeta.capacity() + KEYSTART) % kvmeta.capacity());
+ // calculate the length of the value
+ int vallen = (nextindex >= kvmeta.get(kvoff + VALSTART))
+ ? nextindex - kvmeta.get(kvoff + VALSTART)
+ : (bufvoid - kvmeta.get(kvoff + VALSTART)) + nextindex;
+ vbytes.reset(kvbuffer, kvmeta.get(kvoff + VALSTART), vallen);
+ }
+
+ /**
+ * Inner class wrapping valuebytes, used for appendRaw.
+ */
+ protected class InMemValBytes extends DataInputBuffer {
+ private byte[] buffer;
+ private int start;
+ private int length;
+
+ public void reset(byte[] buffer, int start, int length) {
+ this.buffer = buffer;
+ this.start = start;
+ this.length = length;
+
+ if (start + length > bufvoid) {
+ this.buffer = new byte[this.length];
+ final int taillen = bufvoid - start;
+ System.arraycopy(buffer, start, this.buffer, 0, taillen);
+ System.arraycopy(buffer, 0, this.buffer, taillen, length-taillen);
+ this.start = 0;
+ }
+
+ super.reset(this.buffer, this.start, this.length);
+ }
+ }
+
+ protected class MRResultIterator implements RawKeyValueIterator {
+ private final DataInputBuffer keybuf = new DataInputBuffer();
+ private final InMemValBytes vbytes = new InMemValBytes();
+ private final int end;
+ private int current;
+ public MRResultIterator(int start, int end) {
+ this.end = end;
+ current = start - 1;
+ }
+ public boolean next() throws IOException {
+ return ++current < end;
+ }
+ public DataInputBuffer getKey() throws IOException {
+ final int kvoff = offsetFor(current % maxRec);
+ keybuf.reset(kvbuffer, kvmeta.get(kvoff + KEYSTART),
+ kvmeta.get(kvoff + VALSTART) - kvmeta.get(kvoff + KEYSTART));
+ return keybuf;
+ }
+ public DataInputBuffer getValue() throws IOException {
+ getVBytesForOffset(offsetFor(current % maxRec), vbytes);
+ return vbytes;
+ }
+ public Progress getProgress() {
+ return null;
+ }
+ public void close() { }
+ }
+
+ private void mergeParts() throws IOException, InterruptedException,
+ ClassNotFoundException {
+ // get the approximate size of the final output/index files
+ long finalOutFileSize = 0;
+ long finalIndexFileSize = 0;
+ final Path[] filename = new Path[numSpills];
+ final TaskAttemptID mapId = getTaskID();
+
+ for(int i = 0; i < numSpills; i++) {
+ filename[i] = mapOutputFile.getSpillFile(i);
+ finalOutFileSize += rfs.getFileStatus(filename[i]).getLen();
+ }
+ if (numSpills == 1) { //the spill is the final output
+ rfs.rename(filename[0],
+ mapOutputFile.getOutputFileForWriteInVolume(filename[0]));
+ if (indexCacheList.size() == 0) {
+ rfs.rename(mapOutputFile.getSpillIndexFile(0),
+ mapOutputFile.getOutputIndexFileForWriteInVolume(filename[0]));
+ } else {
+ indexCacheList.get(0).writeToFile(
+ mapOutputFile.getOutputIndexFileForWriteInVolume(filename[0]), job);
+ }
+ return;
+ }
+
+ // read in paged indices
+ for (int i = indexCacheList.size(); i < numSpills; ++i) {
+ Path indexFileName = mapOutputFile.getSpillIndexFile(i);
+ indexCacheList.add(new SpillRecord(indexFileName, job));
+ }
+
+ //make correction in the length to include the sequence file header
+ //lengths for each partition
+ finalOutFileSize += partitions * APPROX_HEADER_LENGTH;
+ finalIndexFileSize = partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH;
+ Path finalOutputFile =
+ mapOutputFile.getOutputFileForWrite(finalOutFileSize);
+ Path finalIndexFile =
+ mapOutputFile.getOutputIndexFileForWrite(finalIndexFileSize);
+
+ //The output stream for the final single output file
+ FSDataOutputStream finalOut = rfs.create(finalOutputFile, true, 4096);
+
+ if (numSpills == 0) {
+ //create dummy files
+ IndexRecord rec = new IndexRecord();
+ SpillRecord sr = new SpillRecord(partitions);
+ try {
+ for (int i = 0; i < partitions; i++) {
+ long segmentStart = finalOut.getPos();
+ Writer<K, V> writer =
+ new Writer<K, V>(job, finalOut, keyClass, valClass, codec, null);
+ writer.close();
+ rec.startOffset = segmentStart;
+ rec.rawLength = writer.getRawLength();
+ rec.partLength = writer.getCompressedLength();
+ sr.putIndex(rec, i);
+ }
+ sr.writeToFile(finalIndexFile, job);
+ } finally {
+ finalOut.close();
+ }
+ return;
+ }
+ {
+ sortPhase.addPhases(partitions); // Divide sort phase into sub-phases
+ Merger.considerFinalMergeForProgress();
+
+ IndexRecord rec = new IndexRecord();
+ final SpillRecord spillRec = new SpillRecord(partitions);
+ for (int parts = 0; parts < partitions; parts++) {
+ //create the segments to be merged
+ List<Segment<K,V>> segmentList =
+ new ArrayList<Segment<K, V>>(numSpills);
+ for(int i = 0; i < numSpills; i++) {
+ IndexRecord indexRecord = indexCacheList.get(i).getIndex(parts);
+
+ Segment<K,V> s =
+ new Segment<K,V>(job, rfs, filename[i], indexRecord.startOffset,
+ indexRecord.partLength, codec, true);
+ segmentList.add(i, s);
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("MapId=" + mapId + " Reducer=" + parts +
+ "Spill =" + i + "(" + indexRecord.startOffset + "," +
+ indexRecord.rawLength + ", " + indexRecord.partLength + ")");
+ }
+ }
+
+ int mergeFactor = job.getInt(JobContext.IO_SORT_FACTOR, 100);
+ // sort the segments only if there are intermediate merges
+ boolean sortSegments = segmentList.size() > mergeFactor;
+ //merge
+ @SuppressWarnings("unchecked")
+ RawKeyValueIterator kvIter = Merger.merge(job, rfs,
+ keyClass, valClass, codec,
+ segmentList, mergeFactor,
+ new Path(mapId.toString()),
+ job.getOutputKeyComparator(), reporter, sortSegments,
+ null, spilledRecordsCounter, sortPhase.phase());
+
+ //write merged output to disk
+ long segmentStart = finalOut.getPos();
+ Writer<K, V> writer =
+ new Writer<K, V>(job, finalOut, keyClass, valClass, codec,
+ spilledRecordsCounter);
+ if (combinerRunner == null || numSpills < minSpillsForCombine) {
+ Merger.writeFile(kvIter, writer, reporter, job);
+ } else {
+ combineCollector.setWriter(writer);
+ combinerRunner.combine(kvIter, combineCollector);
+ }
+
+ //close
+ writer.close();
+
+ sortPhase.startNextPhase();
+
+ // record offsets
+ rec.startOffset = segmentStart;
+ rec.rawLength = writer.getRawLength();
+ rec.partLength = writer.getCompressedLength();
+ spillRec.putIndex(rec, parts);
+ }
+ spillRec.writeToFile(finalIndexFile, job);
+ finalOut.close();
+ for(int i = 0; i < numSpills; i++) {
+ rfs.delete(filename[i],true);
+ }
+ }
+ }
+
+ } // MapOutputBuffer
+
+ /**
+ * Exception indicating that the allocated sort buffer is insufficient
+ * to hold the current record.
+ */
+ @SuppressWarnings("serial")
+ private static class MapBufferTooSmallException extends IOException {
+ public MapBufferTooSmallException(String s) {
+ super(s);
+ }
+ }
+
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/MapTaskCompletionEventsUpdate.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTaskCompletionEventsUpdate.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/MapTaskCompletionEventsUpdate.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTaskCompletionEventsUpdate.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/MapTaskStatus.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTaskStatus.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/MapTaskStatus.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTaskStatus.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/Mapper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Mapper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/Mapper.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Mapper.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Master.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Master.java
new file mode 100644
index 0000000..e2ab5fe
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Master.java
@@ -0,0 +1,52 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityUtil;
+
+public class Master {
+
+ public enum State {
+ INITIALIZING, RUNNING;
+ }
+
+ public static String getMasterUserName(Configuration conf) {
+ return conf.get(MRConfig.MASTER_USER_NAME);
+ }
+
+ public static InetSocketAddress getMasterAddress(Configuration conf) {
+ String jobTrackerStr =
+ conf.get(MRConfig.MASTER_ADDRESS, "localhost:8012");
+ return NetUtils.createSocketAddr(jobTrackerStr);
+ }
+
+ public static String getMasterPrincipal(Configuration conf)
+ throws IOException {
+ String jtHostname = getMasterAddress(conf).getHostName();
+ // get jobtracker principal for use as delegation token renewer
+ return SecurityUtil.getServerPrincipal(getMasterUserName(conf), jtHostname);
+ }
+
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/MergeSorter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MergeSorter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/MergeSorter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MergeSorter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/Merger.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/Merger.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/MultiFileInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MultiFileInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/MultiFileInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MultiFileInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/MultiFileSplit.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MultiFileSplit.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/MultiFileSplit.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MultiFileSplit.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/Operation.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Operation.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/Operation.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Operation.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/OutputCollector.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/OutputCollector.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/OutputCollector.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/OutputCollector.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/OutputCommitter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/OutputCommitter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/OutputCommitter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/OutputCommitter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/OutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/OutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/OutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/OutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/OutputLogFilter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/OutputLogFilter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/OutputLogFilter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/OutputLogFilter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/Partitioner.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Partitioner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/Partitioner.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Partitioner.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/PeriodicStatsAccumulator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/PeriodicStatsAccumulator.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/PeriodicStatsAccumulator.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/PeriodicStatsAccumulator.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/ProgressSplitsBlock.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ProgressSplitsBlock.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/ProgressSplitsBlock.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ProgressSplitsBlock.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/Queue.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Queue.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/Queue.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Queue.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/QueueACL.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueACL.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/QueueACL.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueACL.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/QueueAclsInfo.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueAclsInfo.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/QueueAclsInfo.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueAclsInfo.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/QueueConfigurationParser.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueConfigurationParser.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/QueueConfigurationParser.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueConfigurationParser.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueManager.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueManager.java
new file mode 100644
index 0000000..04ca524
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueManager.java
@@ -0,0 +1,703 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.QueueState;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.util.StringUtils;
+import org.codehaus.jackson.JsonFactory;
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.JsonGenerator;
+
+import java.io.BufferedInputStream;
+import java.io.InputStream;
+import java.io.IOException;
+import java.io.Writer;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.List;
+import java.net.URL;
+
+
+/**
+ * Class that exposes information about queues maintained by the Hadoop
+ * Map/Reduce framework.
+ * <p/>
+ * The Map/Reduce framework can be configured with one or more queues,
+ * depending on the scheduler it is configured with. While some
+ * schedulers work only with one queue, some schedulers support multiple
+ * queues. Some schedulers also support the notion of queues within
+ * queues - a feature called hierarchical queues.
+ * <p/>
+ * Queue names are unique, and used as a key to lookup queues. Hierarchical
+ * queues are named by a 'fully qualified name' such as q1:q2:q3, where
+ * q2 is a child queue of q1 and q3 is a child queue of q2.
+ * <p/>
+ * Leaf level queues are queues that contain no queues within them. Jobs
+ * can be submitted only to leaf level queues.
+ * <p/>
+ * Queues can be configured with various properties. Some of these
+ * properties are common to all schedulers, and those are handled by this
+ * class. Schedulers might also associate several custom properties with
+ * queues. These properties are parsed and maintained per queue by the
+ * framework. If schedulers need more complicated structure to maintain
+ * configuration per queue, they are free to not use the facilities
+ * provided by the framework, but define their own mechanisms. In such cases,
+ * it is likely that the name of the queue will be used to relate the
+ * common properties of a queue with scheduler specific properties.
+ * <p/>
+ * Information related to a queue, such as its name, properties, scheduling
+ * information and children are exposed by this class via a serializable
+ * class called {@link JobQueueInfo}.
+ * <p/>
+ * Queues are configured in the configuration file mapred-queues.xml.
+ * To support backwards compatibility, queues can also be configured
+ * in mapred-site.xml. However, when configured in the latter, there is
+ * no support for hierarchical queues.
+ */
+@InterfaceAudience.Private
+public class QueueManager {
+
+ private static final Log LOG = LogFactory.getLog(QueueManager.class);
+
+ // Map of a queue name and Queue object
+ private Map<String, Queue> leafQueues = new HashMap<String,Queue>();
+ private Map<String, Queue> allQueues = new HashMap<String, Queue>();
+ public static final String QUEUE_CONF_FILE_NAME = "mapred-queues.xml";
+ static final String QUEUE_CONF_DEFAULT_FILE_NAME = "mapred-queues-default.xml";
+
+ //Prefix in configuration for queue related keys
+ static final String QUEUE_CONF_PROPERTY_NAME_PREFIX = "mapred.queue.";
+
+ //Resource in which queue acls are configured.
+ private Queue root = null;
+
+ // represents if job and queue acls are enabled on the mapreduce cluster
+ private boolean areAclsEnabled = false;
+
+ /**
+ * Factory method to create an appropriate instance of a queue
+ * configuration parser.
+ * <p/>
+ * Returns a parser that can parse either the deprecated property
+ * style queue configuration in mapred-site.xml, or one that can
+ * parse hierarchical queues in mapred-queues.xml. First preference
+ * is given to configuration in mapred-site.xml. If no queue
+ * configuration is found there, then a parser that can parse
+ * configuration in mapred-queues.xml is created.
+ *
+ * @param conf Configuration instance that determines which parser
+ * to use.
+ * @return Queue configuration parser
+ */
+ static QueueConfigurationParser getQueueConfigurationParser(
+ Configuration conf, boolean reloadConf, boolean areAclsEnabled) {
+ if (conf != null && conf.get(
+ DeprecatedQueueConfigurationParser.MAPRED_QUEUE_NAMES_KEY) != null) {
+ if (reloadConf) {
+ conf.reloadConfiguration();
+ }
+ return new DeprecatedQueueConfigurationParser(conf);
+ } else {
+ URL xmlInUrl =
+ Thread.currentThread().getContextClassLoader()
+ .getResource(QUEUE_CONF_FILE_NAME);
+ if (xmlInUrl == null) {
+ xmlInUrl = Thread.currentThread().getContextClassLoader()
+ .getResource(QUEUE_CONF_DEFAULT_FILE_NAME);
+ assert xmlInUrl != null; // this should be in our jar
+ }
+ InputStream stream = null;
+ try {
+ stream = xmlInUrl.openStream();
+ return new QueueConfigurationParser(new BufferedInputStream(stream),
+ areAclsEnabled);
+ } catch (IOException ioe) {
+ throw new RuntimeException("Couldn't open queue configuration at " +
+ xmlInUrl, ioe);
+ } finally {
+ IOUtils.closeStream(stream);
+ }
+ }
+ }
+
+ QueueManager() {// acls are disabled
+ this(false);
+ }
+
+ QueueManager(boolean areAclsEnabled) {
+ this.areAclsEnabled = areAclsEnabled;
+ initialize(getQueueConfigurationParser(null, false, areAclsEnabled));
+ }
+
+ /**
+ * Construct a new QueueManager using configuration specified in the passed
+ * in {@link org.apache.hadoop.conf.Configuration} object.
+ * <p/>
+ * This instance supports queue configuration specified in mapred-site.xml,
+ * but without support for hierarchical queues. If no queue configuration
+ * is found in mapred-site.xml, it will then look for site configuration
+ * in mapred-queues.xml supporting hierarchical queues.
+ *
+ * @param clusterConf mapreduce cluster configuration
+ */
+ public QueueManager(Configuration clusterConf) {
+ areAclsEnabled = clusterConf.getBoolean(MRConfig.MR_ACLS_ENABLED, false);
+ initialize(getQueueConfigurationParser(clusterConf, false, areAclsEnabled));
+ }
+
+ /**
+ * Create an instance that supports hierarchical queues, defined in
+ * the passed in configuration file.
+ * <p/>
+ * This is mainly used for testing purposes and should not called from
+ * production code.
+ *
+ * @param confFile File where the queue configuration is found.
+ */
+ QueueManager(String confFile, boolean areAclsEnabled) {
+ this.areAclsEnabled = areAclsEnabled;
+ QueueConfigurationParser cp =
+ new QueueConfigurationParser(confFile, areAclsEnabled);
+ initialize(cp);
+ }
+
+ /**
+ * Initialize the queue-manager with the queue hierarchy specified by the
+ * given {@link QueueConfigurationParser}.
+ *
+ * @param cp
+ */
+ private void initialize(QueueConfigurationParser cp) {
+ this.root = cp.getRoot();
+ leafQueues.clear();
+ allQueues.clear();
+ //At this point we have root populated
+ //update data structures leafNodes.
+ leafQueues = getRoot().getLeafQueues();
+ allQueues.putAll(getRoot().getInnerQueues());
+ allQueues.putAll(leafQueues);
+
+ LOG.info("AllQueues : " + allQueues + "; LeafQueues : " + leafQueues);
+ }
+
+ /**
+ * Return the set of leaf level queues configured in the system to
+ * which jobs are submitted.
+ * <p/>
+ * The number of queues configured should be dependent on the Scheduler
+ * configured. Note that some schedulers work with only one queue, whereas
+ * others can support multiple queues.
+ *
+ * @return Set of queue names.
+ */
+ public synchronized Set<String> getLeafQueueNames() {
+ return leafQueues.keySet();
+ }
+
+ /**
+ * Return true if the given user is part of the ACL for the given
+ * {@link QueueACL} name for the given queue.
+ * <p/>
+ * An operation is allowed if all users are provided access for this
+ * operation, or if either the user or any of the groups specified is
+ * provided access.
+ *
+ * @param queueName Queue on which the operation needs to be performed.
+ * @param qACL The queue ACL name to be checked
+ * @param ugi The user and groups who wish to perform the operation.
+ * @return true if the operation is allowed, false otherwise.
+ */
+ public synchronized boolean hasAccess(
+ String queueName, QueueACL qACL, UserGroupInformation ugi) {
+
+ Queue q = leafQueues.get(queueName);
+
+ if (q == null) {
+ LOG.info("Queue " + queueName + " is not present");
+ return false;
+ }
+
+ if(q.getChildren() != null && !q.getChildren().isEmpty()) {
+ LOG.info("Cannot submit job to parent queue " + q.getName());
+ return false;
+ }
+
+ if (!areAclsEnabled()) {
+ return true;
+ }
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Checking access for the acl " + toFullPropertyName(queueName,
+ qACL.getAclName()) + " for user " + ugi.getShortUserName());
+ }
+
+ AccessControlList acl = q.getAcls().get(
+ toFullPropertyName(queueName, qACL.getAclName()));
+ if (acl == null) {
+ return false;
+ }
+
+ // Check if user is part of the ACL
+ return acl.isUserAllowed(ugi);
+ }
+
+ /**
+ * Checks whether the given queue is running or not.
+ *
+ * @param queueName name of the queue
+ * @return true, if the queue is running.
+ */
+ synchronized boolean isRunning(String queueName) {
+ Queue q = leafQueues.get(queueName);
+ if (q != null) {
+ return q.getState().equals(QueueState.RUNNING);
+ }
+ return false;
+ }
+
+ /**
+ * Set a generic Object that represents scheduling information relevant
+ * to a queue.
+ * <p/>
+ * A string representation of this Object will be used by the framework
+ * to display in user facing applications like the JobTracker web UI and
+ * the hadoop CLI.
+ *
+ * @param queueName queue for which the scheduling information is to be set.
+ * @param queueInfo scheduling information for this queue.
+ */
+ public synchronized void setSchedulerInfo(
+ String queueName,
+ Object queueInfo) {
+ if (allQueues.get(queueName) != null) {
+ allQueues.get(queueName).setSchedulingInfo(queueInfo);
+ }
+ }
+
+ /**
+ * Return the scheduler information configured for this queue.
+ *
+ * @param queueName queue for which the scheduling information is required.
+ * @return The scheduling information for this queue.
+ */
+ public synchronized Object getSchedulerInfo(String queueName) {
+ if (allQueues.get(queueName) != null) {
+ return allQueues.get(queueName).getSchedulingInfo();
+ }
+ return null;
+ }
+
+ static final String MSG_REFRESH_FAILURE_WITH_CHANGE_OF_HIERARCHY =
+ "Unable to refresh queues because queue-hierarchy changed. "
+ + "Retaining existing configuration. ";
+
+ static final String MSG_REFRESH_FAILURE_WITH_SCHEDULER_FAILURE =
+ "Scheduler couldn't refresh it's queues with the new"
+ + " configuration properties. "
+ + "Retaining existing configuration throughout the system.";
+
+ /**
+ * Refresh acls, state and scheduler properties for the configured queues.
+ * <p/>
+ * This method reloads configuration related to queues, but does not
+ * support changes to the list of queues or hierarchy. The expected usage
+ * is that an administrator can modify the queue configuration file and
+ * fire an admin command to reload queue configuration. If there is a
+ * problem in reloading configuration, then this method guarantees that
+ * existing queue configuration is untouched and in a consistent state.
+ *
+ * @param schedulerRefresher
+ * @throws IOException when queue configuration file is invalid.
+ */
+ synchronized void refreshQueues(Configuration conf,
+ QueueRefresher schedulerRefresher)
+ throws IOException {
+
+ // Create a new configuration parser using the passed conf object.
+ QueueConfigurationParser cp =
+ getQueueConfigurationParser(conf, true, areAclsEnabled);
+
+ /*
+ * (1) Validate the refresh of properties owned by QueueManager. As of now,
+ * while refreshing queue properties, we only check that the hierarchy is
+ * the same w.r.t queue names, ACLs and state for each queue and don't
+ * support adding new queues or removing old queues
+ */
+ if (!root.isHierarchySameAs(cp.getRoot())) {
+ LOG.warn(MSG_REFRESH_FAILURE_WITH_CHANGE_OF_HIERARCHY);
+ throw new IOException(MSG_REFRESH_FAILURE_WITH_CHANGE_OF_HIERARCHY);
+ }
+
+ /*
+ * (2) QueueManager owned properties are validated. Now validate and
+ * refresh the properties of scheduler in a single step.
+ */
+ if (schedulerRefresher != null) {
+ try {
+ schedulerRefresher.refreshQueues(cp.getRoot().getJobQueueInfo().getChildren());
+ } catch (Throwable e) {
+ StringBuilder msg =
+ new StringBuilder(
+ "Scheduler's refresh-queues failed with the exception : "
+ + StringUtils.stringifyException(e));
+ msg.append("\n");
+ msg.append(MSG_REFRESH_FAILURE_WITH_SCHEDULER_FAILURE);
+ LOG.error(msg.toString());
+ throw new IOException(msg.toString());
+ }
+ }
+
+ /*
+ * (3) Scheduler has validated and refreshed its queues successfully, now
+ * refresh the properties owned by QueueManager
+ */
+
+ // First copy the scheduling information recursively into the new
+ // queue-hierarchy. This is done to retain old scheduling information. This
+ // is done after scheduler refresh and not before it because during refresh,
+ // schedulers may wish to change their scheduling info objects too.
+ cp.getRoot().copySchedulingInfo(this.root);
+
+ // Now switch roots.
+ initialize(cp);
+
+ LOG.info("Queue configuration is refreshed successfully.");
+ }
+
+ // this method is for internal use only
+ public static final String toFullPropertyName(
+ String queue,
+ String property) {
+ return QUEUE_CONF_PROPERTY_NAME_PREFIX + queue + "." + property;
+ }
+
+ /**
+ * Return an array of {@link JobQueueInfo} objects for all the
+ * queues configurated in the system.
+ *
+ * @return array of JobQueueInfo objects.
+ */
+ synchronized JobQueueInfo[] getJobQueueInfos() {
+ ArrayList<JobQueueInfo> queueInfoList = new ArrayList<JobQueueInfo>();
+ for (String queue : allQueues.keySet()) {
+ JobQueueInfo queueInfo = getJobQueueInfo(queue);
+ if (queueInfo != null) {
+ queueInfoList.add(queueInfo);
+ }
+ }
+ return queueInfoList.toArray(
+ new JobQueueInfo[queueInfoList.size()]);
+ }
+
+
+ /**
+ * Return {@link JobQueueInfo} for a given queue.
+ *
+ * @param queue name of the queue
+ * @return JobQueueInfo for the queue, null if the queue is not found.
+ */
+ synchronized JobQueueInfo getJobQueueInfo(String queue) {
+ if (allQueues.containsKey(queue)) {
+ return allQueues.get(queue).getJobQueueInfo();
+ }
+
+ return null;
+ }
+
+ /**
+ * JobQueueInfo for all the queues.
+ * <p/>
+ * Contribs can use this data structure to either create a hierarchy or for
+ * traversing.
+ * They can also use this to refresh properties in case of refreshQueues
+ *
+ * @return a map for easy navigation.
+ */
+ synchronized Map<String, JobQueueInfo> getJobQueueInfoMapping() {
+ Map<String, JobQueueInfo> m = new HashMap<String, JobQueueInfo>();
+
+ for (String key : allQueues.keySet()) {
+ m.put(key, allQueues.get(key).getJobQueueInfo());
+ }
+
+ return m;
+ }
+
+ /**
+ * Generates the array of QueueAclsInfo object.
+ * <p/>
+ * The array consists of only those queues for which user has acls.
+ *
+ * @return QueueAclsInfo[]
+ * @throws java.io.IOException
+ */
+ synchronized QueueAclsInfo[] getQueueAcls(UserGroupInformation ugi)
+ throws IOException {
+ //List of all QueueAclsInfo objects , this list is returned
+ ArrayList<QueueAclsInfo> queueAclsInfolist =
+ new ArrayList<QueueAclsInfo>();
+ QueueACL[] qAcls = QueueACL.values();
+ for (String queueName : leafQueues.keySet()) {
+ QueueAclsInfo queueAclsInfo = null;
+ ArrayList<String> operationsAllowed = null;
+ for (QueueACL qAcl : qAcls) {
+ if (hasAccess(queueName, qAcl, ugi)) {
+ if (operationsAllowed == null) {
+ operationsAllowed = new ArrayList<String>();
+ }
+ operationsAllowed.add(qAcl.getAclName());
+ }
+ }
+ if (operationsAllowed != null) {
+ //There is atleast 1 operation supported for queue <queueName>
+ //, hence initialize queueAclsInfo
+ queueAclsInfo = new QueueAclsInfo(
+ queueName, operationsAllowed.toArray
+ (new String[operationsAllowed.size()]));
+ queueAclsInfolist.add(queueAclsInfo);
+ }
+ }
+ return queueAclsInfolist.toArray(
+ new QueueAclsInfo[queueAclsInfolist.size()]);
+ }
+
+ /**
+ * ONLY FOR TESTING - Do not use in production code.
+ * This method is used for setting up of leafQueues only.
+ * We are not setting the hierarchy here.
+ *
+ * @param queues
+ */
+ synchronized void setQueues(Queue[] queues) {
+ root.getChildren().clear();
+ leafQueues.clear();
+ allQueues.clear();
+
+ for (Queue queue : queues) {
+ root.addChild(queue);
+ }
+ //At this point we have root populated
+ //update data structures leafNodes.
+ leafQueues = getRoot().getLeafQueues();
+ allQueues.putAll(getRoot().getInnerQueues());
+ allQueues.putAll(leafQueues);
+ }
+
+ /**
+ * Return an array of {@link JobQueueInfo} objects for the root
+ * queues configured in the system.
+ * <p/>
+ * Root queues are queues that are at the top-most level in the
+ * hierarchy of queues in mapred-queues.xml, or they are the queues
+ * configured in the mapred.queue.names key in mapred-site.xml.
+ *
+ * @return array of JobQueueInfo objects for root level queues.
+ */
+
+ JobQueueInfo[] getRootQueues() {
+ List<JobQueueInfo> list = getRoot().getJobQueueInfo().getChildren();
+ return list.toArray(new JobQueueInfo[list.size()]);
+ }
+
+ /**
+ * Get the complete hierarchy of children for queue
+ * queueName
+ *
+ * @param queueName
+ * @return
+ */
+ JobQueueInfo[] getChildQueues(String queueName) {
+ List<JobQueueInfo> list =
+ allQueues.get(queueName).getJobQueueInfo().getChildren();
+ if (list != null) {
+ return list.toArray(new JobQueueInfo[list.size()]);
+ } else {
+ return new JobQueueInfo[0];
+ }
+ }
+
+ /**
+ * Used only for testing purposes .
+ * This method is unstable as refreshQueues would leave this
+ * data structure in unstable state.
+ *
+ * @param queueName
+ * @return
+ */
+ Queue getQueue(String queueName) {
+ return this.allQueues.get(queueName);
+ }
+
+
+ /**
+ * Return if ACLs are enabled for the Map/Reduce system
+ *
+ * @return true if ACLs are enabled.
+ */
+ boolean areAclsEnabled() {
+ return areAclsEnabled;
+ }
+
+ /**
+ * Used only for test.
+ *
+ * @return
+ */
+ Queue getRoot() {
+ return root;
+ }
+
+ /**
+ * Returns the specific queue ACL for the given queue.
+ * Returns null if the given queue does not exist or the acl is not
+ * configured for that queue.
+ * If acls are disabled(mapreduce.cluster.acls.enabled set to false), returns
+ * ACL with all users.
+ */
+ synchronized AccessControlList getQueueACL(String queueName,
+ QueueACL qACL) {
+ if (areAclsEnabled) {
+ Queue q = leafQueues.get(queueName);
+ if (q != null) {
+ return q.getAcls().get(toFullPropertyName(
+ queueName, qACL.getAclName()));
+ }
+ else {
+ LOG.warn("Queue " + queueName + " is not present.");
+ return null;
+ }
+ }
+ return new AccessControlList("*");
+ }
+
+ /**
+ * Dumps the configuration of hierarchy of queues
+ * @param out the writer object to which dump is written
+ * @throws IOException
+ */
+ static void dumpConfiguration(Writer out,Configuration conf) throws IOException {
+ dumpConfiguration(out, null,conf);
+ }
+
+ /***
+ * Dumps the configuration of hierarchy of queues with
+ * the xml file path given. It is to be used directly ONLY FOR TESTING.
+ * @param out the writer object to which dump is written to.
+ * @param configFile the filename of xml file
+ * @throws IOException
+ */
+ static void dumpConfiguration(Writer out, String configFile,
+ Configuration conf) throws IOException {
+ if (conf != null && conf.get(DeprecatedQueueConfigurationParser.
+ MAPRED_QUEUE_NAMES_KEY) != null) {
+ return;
+ }
+
+ JsonFactory dumpFactory = new JsonFactory();
+ JsonGenerator dumpGenerator = dumpFactory.createJsonGenerator(out);
+ QueueConfigurationParser parser;
+ boolean aclsEnabled = false;
+ if (conf != null) {
+ aclsEnabled = conf.getBoolean(MRConfig.MR_ACLS_ENABLED, false);
+ }
+ if (configFile != null && !"".equals(configFile)) {
+ parser = new QueueConfigurationParser(configFile, aclsEnabled);
+ }
+ else {
+ parser = getQueueConfigurationParser(null, false, aclsEnabled);
+ }
+ dumpGenerator.writeStartObject();
+ dumpGenerator.writeFieldName("queues");
+ dumpGenerator.writeStartArray();
+ dumpConfiguration(dumpGenerator,parser.getRoot().getChildren());
+ dumpGenerator.writeEndArray();
+ dumpGenerator.writeEndObject();
+ dumpGenerator.flush();
+ }
+
+ /**
+ * method to perform depth-first search and write the parameters of every
+ * queue in JSON format.
+ * @param dumpGenerator JsonGenerator object which takes the dump and flushes
+ * to a writer object
+ * @param rootQueues the top-level queues
+ * @throws JsonGenerationException
+ * @throws IOException
+ */
+ private static void dumpConfiguration(JsonGenerator dumpGenerator,
+ Set<Queue> rootQueues) throws JsonGenerationException, IOException {
+ for (Queue queue : rootQueues) {
+ dumpGenerator.writeStartObject();
+ dumpGenerator.writeStringField("name", queue.getName());
+ dumpGenerator.writeStringField("state", queue.getState().toString());
+ AccessControlList submitJobList = null;
+ AccessControlList administerJobsList = null;
+ if (queue.getAcls() != null) {
+ submitJobList =
+ queue.getAcls().get(toFullPropertyName(queue.getName(),
+ QueueACL.SUBMIT_JOB.getAclName()));
+ administerJobsList =
+ queue.getAcls().get(toFullPropertyName(queue.getName(),
+ QueueACL.ADMINISTER_JOBS.getAclName()));
+ }
+ String aclsSubmitJobValue = " ";
+ if (submitJobList != null ) {
+ aclsSubmitJobValue = submitJobList.getAclString();
+ }
+ dumpGenerator.writeStringField("acl_submit_job", aclsSubmitJobValue);
+ String aclsAdministerValue = " ";
+ if (administerJobsList != null) {
+ aclsAdministerValue = administerJobsList.getAclString();
+ }
+ dumpGenerator.writeStringField("acl_administer_jobs",
+ aclsAdministerValue);
+ dumpGenerator.writeFieldName("properties");
+ dumpGenerator.writeStartArray();
+ if (queue.getProperties() != null) {
+ for (Map.Entry<Object, Object>property :
+ queue.getProperties().entrySet()) {
+ dumpGenerator.writeStartObject();
+ dumpGenerator.writeStringField("key", (String)property.getKey());
+ dumpGenerator.writeStringField("value", (String)property.getValue());
+ dumpGenerator.writeEndObject();
+ }
+ }
+ dumpGenerator.writeEndArray();
+ Set<Queue> childQueues = queue.getChildren();
+ dumpGenerator.writeFieldName("children");
+ dumpGenerator.writeStartArray();
+ if (childQueues != null && childQueues.size() > 0) {
+ dumpConfiguration(dumpGenerator, childQueues);
+ }
+ dumpGenerator.writeEndArray();
+ dumpGenerator.writeEndObject();
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueRefresher.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueRefresher.java
new file mode 100644
index 0000000..50f724c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueRefresher.java
@@ -0,0 +1,49 @@
+package org.apache.hadoop.mapred;
+
+import java.util.List;
+
+/**
+ * Abstract QueueRefresher class. Scheduler's can extend this and return an
+ * instance of this in the {@link #getQueueRefresher()} method. The
+ * {@link #refreshQueues(List)} method of this instance will be invoked by the
+ * {@link QueueManager} whenever it gets a request from an administrator to
+ * refresh its own queue-configuration. This method has a documented contract
+ * between the {@link QueueManager} and the {@link TaskScheduler}.
+ *
+ * Before calling QueueRefresher, the caller must hold the lock to the
+ * corresponding {@link TaskScheduler} (generally in the {@link JobTracker}).
+ */
+abstract class QueueRefresher {
+
+ /**
+ * Refresh the queue-configuration in the scheduler. This method has the
+ * following contract.
+ * <ol>
+ * <li>Before this method, {@link QueueManager} does a validation of the new
+ * queue-configuration. For e.g, currently addition of new queues, or
+ * removal of queues at any level in the hierarchy is not supported by
+ * {@link QueueManager} and so are not supported for schedulers too.</li>
+ * <li>Schedulers will be passed a list of {@link JobQueueInfo}s of the root
+ * queues i.e. the queues at the top level. All the descendants are properly
+ * linked from these top-level queues.</li>
+ * <li>Schedulers should use the scheduler specific queue properties from
+ * the newRootQueues, validate the properties themselves and apply them
+ * internally.</li>
+ * <li>
+ * Once the method returns successfully from the schedulers, it is assumed
+ * that the refresh of queue properties is successful throughout and will be
+ * 'committed' internally to {@link QueueManager} too. It is guaranteed that
+ * at no point, after successful return from the scheduler, is the queue
+ * refresh in QueueManager failed. If ever, such abnormalities happen, the
+ * queue framework will be inconsistent and will need a JT restart.</li>
+ * <li>If scheduler throws an exception during {@link #refreshQueues()},
+ * {@link QueueManager} throws away the newly read configuration, retains
+ * the old (consistent) configuration and informs the request issuer about
+ * the error appropriately.</li>
+ * </ol>
+ *
+ * @param newRootQueues
+ */
+ abstract void refreshQueues(List<JobQueueInfo> newRootQueues)
+ throws Throwable;
+}
\ No newline at end of file
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/RamManager.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RamManager.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/RamManager.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RamManager.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/RawKeyValueIterator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RawKeyValueIterator.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/RawKeyValueIterator.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RawKeyValueIterator.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/RecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/RecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/RecordWriter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RecordWriter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/RecordWriter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RecordWriter.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ReduceTask.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ReduceTask.java
new file mode 100644
index 0000000..a65e1af
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ReduceTask.java
@@ -0,0 +1,618 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystem.Statistics;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.RawComparator;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+import org.apache.hadoop.io.SequenceFile.CompressionType;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.DefaultCodec;
+import org.apache.hadoop.mapred.SortedRanges.SkipRangeIterator;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.TaskCounter;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter;
+import org.apache.hadoop.mapreduce.task.reduce.Shuffle;
+import org.apache.hadoop.util.Progress;
+import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.ReflectionUtils;
+
+/** A Reduce task. */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class ReduceTask extends Task {
+
+ static { // register a ctor
+ WritableFactories.setFactory
+ (ReduceTask.class,
+ new WritableFactory() {
+ public Writable newInstance() { return new ReduceTask(); }
+ });
+ }
+
+ private static final Log LOG = LogFactory.getLog(ReduceTask.class.getName());
+ private int numMaps;
+
+ private CompressionCodec codec;
+
+
+ {
+ getProgress().setStatus("reduce");
+ setPhase(TaskStatus.Phase.SHUFFLE); // phase to start with
+ }
+
+ private Progress copyPhase;
+ private Progress sortPhase;
+ private Progress reducePhase;
+ private Counters.Counter shuffledMapsCounter =
+ getCounters().findCounter(TaskCounter.SHUFFLED_MAPS);
+ private Counters.Counter reduceShuffleBytes =
+ getCounters().findCounter(TaskCounter.REDUCE_SHUFFLE_BYTES);
+ private Counters.Counter reduceInputKeyCounter =
+ getCounters().findCounter(TaskCounter.REDUCE_INPUT_GROUPS);
+ private Counters.Counter reduceInputValueCounter =
+ getCounters().findCounter(TaskCounter.REDUCE_INPUT_RECORDS);
+ private Counters.Counter reduceOutputCounter =
+ getCounters().findCounter(TaskCounter.REDUCE_OUTPUT_RECORDS);
+ private Counters.Counter reduceCombineInputCounter =
+ getCounters().findCounter(TaskCounter.COMBINE_INPUT_RECORDS);
+ private Counters.Counter reduceCombineOutputCounter =
+ getCounters().findCounter(TaskCounter.COMBINE_OUTPUT_RECORDS);
+ private Counters.Counter fileOutputByteCounter =
+ getCounters().findCounter(FileOutputFormatCounter.BYTES_WRITTEN);
+
+ // A custom comparator for map output files. Here the ordering is determined
+ // by the file's size and path. In case of files with same size and different
+ // file paths, the first parameter is considered smaller than the second one.
+ // In case of files with same size and path are considered equal.
+ private Comparator<FileStatus> mapOutputFileComparator =
+ new Comparator<FileStatus>() {
+ public int compare(FileStatus a, FileStatus b) {
+ if (a.getLen() < b.getLen())
+ return -1;
+ else if (a.getLen() == b.getLen())
+ if (a.getPath().toString().equals(b.getPath().toString()))
+ return 0;
+ else
+ return -1;
+ else
+ return 1;
+ }
+ };
+
+ // A sorted set for keeping a set of map output files on disk
+ private final SortedSet<FileStatus> mapOutputFilesOnDisk =
+ new TreeSet<FileStatus>(mapOutputFileComparator);
+
+ public ReduceTask() {
+ super();
+ }
+
+ public ReduceTask(String jobFile, TaskAttemptID taskId,
+ int partition, int numMaps, int numSlotsRequired) {
+ super(jobFile, taskId, partition, numSlotsRequired);
+ this.numMaps = numMaps;
+ }
+
+ private CompressionCodec initCodec() {
+ // check if map-outputs are to be compressed
+ if (conf.getCompressMapOutput()) {
+ Class<? extends CompressionCodec> codecClass =
+ conf.getMapOutputCompressorClass(DefaultCodec.class);
+ return ReflectionUtils.newInstance(codecClass, conf);
+ }
+
+ return null;
+ }
+
+ @Override
+ public boolean isMapTask() {
+ return false;
+ }
+
+ public int getNumMaps() { return numMaps; }
+
+ /**
+ * Localize the given JobConf to be specific for this task.
+ */
+ @Override
+ public void localizeConfiguration(JobConf conf) throws IOException {
+ super.localizeConfiguration(conf);
+ conf.setNumMapTasks(numMaps);
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ super.write(out);
+
+ out.writeInt(numMaps); // write the number of maps
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ super.readFields(in);
+
+ numMaps = in.readInt();
+ }
+
+ // Get the input files for the reducer.
+ private Path[] getMapFiles(FileSystem fs, boolean isLocal)
+ throws IOException {
+ List<Path> fileList = new ArrayList<Path>();
+ if (isLocal) {
+ // for local jobs
+ for(int i = 0; i < numMaps; ++i) {
+ fileList.add(mapOutputFile.getInputFile(i));
+ }
+ } else {
+ // for non local jobs
+ for (FileStatus filestatus : mapOutputFilesOnDisk) {
+ fileList.add(filestatus.getPath());
+ }
+ }
+ return fileList.toArray(new Path[0]);
+ }
+
+ private class ReduceValuesIterator<KEY,VALUE>
+ extends ValuesIterator<KEY,VALUE> {
+ public ReduceValuesIterator (RawKeyValueIterator in,
+ RawComparator<KEY> comparator,
+ Class<KEY> keyClass,
+ Class<VALUE> valClass,
+ Configuration conf, Progressable reporter)
+ throws IOException {
+ super(in, comparator, keyClass, valClass, conf, reporter);
+ }
+
+ @Override
+ public VALUE next() {
+ reduceInputValueCounter.increment(1);
+ return moveToNext();
+ }
+
+ protected VALUE moveToNext() {
+ return super.next();
+ }
+
+ public void informReduceProgress() {
+ reducePhase.set(super.in.getProgress().getProgress()); // update progress
+ reporter.progress();
+ }
+ }
+
+ private class SkippingReduceValuesIterator<KEY,VALUE>
+ extends ReduceValuesIterator<KEY,VALUE> {
+ private SkipRangeIterator skipIt;
+ private TaskUmbilicalProtocol umbilical;
+ private Counters.Counter skipGroupCounter;
+ private Counters.Counter skipRecCounter;
+ private long grpIndex = -1;
+ private Class<KEY> keyClass;
+ private Class<VALUE> valClass;
+ private SequenceFile.Writer skipWriter;
+ private boolean toWriteSkipRecs;
+ private boolean hasNext;
+ private TaskReporter reporter;
+
+ public SkippingReduceValuesIterator(RawKeyValueIterator in,
+ RawComparator<KEY> comparator, Class<KEY> keyClass,
+ Class<VALUE> valClass, Configuration conf, TaskReporter reporter,
+ TaskUmbilicalProtocol umbilical) throws IOException {
+ super(in, comparator, keyClass, valClass, conf, reporter);
+ this.umbilical = umbilical;
+ this.skipGroupCounter =
+ reporter.getCounter(TaskCounter.REDUCE_SKIPPED_GROUPS);
+ this.skipRecCounter =
+ reporter.getCounter(TaskCounter.REDUCE_SKIPPED_RECORDS);
+ this.toWriteSkipRecs = toWriteSkipRecs() &&
+ SkipBadRecords.getSkipOutputPath(conf)!=null;
+ this.keyClass = keyClass;
+ this.valClass = valClass;
+ this.reporter = reporter;
+ skipIt = getSkipRanges().skipRangeIterator();
+ mayBeSkip();
+ }
+
+ public void nextKey() throws IOException {
+ super.nextKey();
+ mayBeSkip();
+ }
+
+ public boolean more() {
+ return super.more() && hasNext;
+ }
+
+ private void mayBeSkip() throws IOException {
+ hasNext = skipIt.hasNext();
+ if(!hasNext) {
+ LOG.warn("Further groups got skipped.");
+ return;
+ }
+ grpIndex++;
+ long nextGrpIndex = skipIt.next();
+ long skip = 0;
+ long skipRec = 0;
+ while(grpIndex<nextGrpIndex && super.more()) {
+ while (hasNext()) {
+ VALUE value = moveToNext();
+ if(toWriteSkipRecs) {
+ writeSkippedRec(getKey(), value);
+ }
+ skipRec++;
+ }
+ super.nextKey();
+ grpIndex++;
+ skip++;
+ }
+
+ //close the skip writer once all the ranges are skipped
+ if(skip>0 && skipIt.skippedAllRanges() && skipWriter!=null) {
+ skipWriter.close();
+ }
+ skipGroupCounter.increment(skip);
+ skipRecCounter.increment(skipRec);
+ reportNextRecordRange(umbilical, grpIndex);
+ }
+
+ @SuppressWarnings("unchecked")
+ private void writeSkippedRec(KEY key, VALUE value) throws IOException{
+ if(skipWriter==null) {
+ Path skipDir = SkipBadRecords.getSkipOutputPath(conf);
+ Path skipFile = new Path(skipDir, getTaskID().toString());
+ skipWriter = SequenceFile.createWriter(
+ skipFile.getFileSystem(conf), conf, skipFile,
+ keyClass, valClass,
+ CompressionType.BLOCK, reporter);
+ }
+ skipWriter.append(key, value);
+ }
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public void run(JobConf job, final TaskUmbilicalProtocol umbilical)
+ throws IOException, InterruptedException, ClassNotFoundException {
+ job.setBoolean(JobContext.SKIP_RECORDS, isSkipping());
+
+ if (isMapOrReduce()) {
+ copyPhase = getProgress().addPhase("copy");
+ sortPhase = getProgress().addPhase("sort");
+ reducePhase = getProgress().addPhase("reduce");
+ }
+ // start thread that will handle communication with parent
+ TaskReporter reporter = startReporter(umbilical);
+
+ boolean useNewApi = job.getUseNewReducer();
+ initialize(job, getJobID(), reporter, useNewApi);
+
+ // check if it is a cleanupJobTask
+ if (jobCleanup) {
+ runJobCleanupTask(umbilical, reporter);
+ return;
+ }
+ if (jobSetup) {
+ runJobSetupTask(umbilical, reporter);
+ return;
+ }
+ if (taskCleanup) {
+ runTaskCleanupTask(umbilical, reporter);
+ return;
+ }
+
+ // Initialize the codec
+ codec = initCodec();
+ RawKeyValueIterator rIter = null;
+ boolean isLocal = "local".equals(job.get(MRConfig.MASTER_ADDRESS, "local"));
+ if (!isLocal) {
+ Class combinerClass = conf.getCombinerClass();
+ CombineOutputCollector combineCollector =
+ (null != combinerClass) ?
+ new CombineOutputCollector(reduceCombineOutputCounter, reporter, conf) : null;
+
+ Shuffle shuffle =
+ new Shuffle(getTaskID(), job, FileSystem.getLocal(job), umbilical,
+ super.lDirAlloc, reporter, codec,
+ combinerClass, combineCollector,
+ spilledRecordsCounter, reduceCombineInputCounter,
+ shuffledMapsCounter,
+ reduceShuffleBytes, failedShuffleCounter,
+ mergedMapOutputsCounter,
+ taskStatus, copyPhase, sortPhase, this,
+ mapOutputFile);
+ rIter = shuffle.run();
+ } else {
+ // local job runner doesn't have a copy phase
+ copyPhase.complete();
+ final FileSystem rfs = FileSystem.getLocal(job).getRaw();
+ rIter = Merger.merge(job, rfs, job.getMapOutputKeyClass(),
+ job.getMapOutputValueClass(), codec,
+ getMapFiles(rfs, true),
+ !conf.getKeepFailedTaskFiles(),
+ job.getInt(JobContext.IO_SORT_FACTOR, 100),
+ new Path(getTaskID().toString()),
+ job.getOutputKeyComparator(),
+ reporter, spilledRecordsCounter, null, null);
+ }
+ // free up the data structures
+ mapOutputFilesOnDisk.clear();
+
+ sortPhase.complete(); // sort is complete
+ setPhase(TaskStatus.Phase.REDUCE);
+ statusUpdate(umbilical);
+ Class keyClass = job.getMapOutputKeyClass();
+ Class valueClass = job.getMapOutputValueClass();
+ RawComparator comparator = job.getOutputValueGroupingComparator();
+
+ if (useNewApi) {
+ runNewReducer(job, umbilical, reporter, rIter, comparator,
+ keyClass, valueClass);
+ } else {
+ runOldReducer(job, umbilical, reporter, rIter, comparator,
+ keyClass, valueClass);
+ }
+ done(umbilical, reporter);
+ }
+
+ @SuppressWarnings("unchecked")
+ private <INKEY,INVALUE,OUTKEY,OUTVALUE>
+ void runOldReducer(JobConf job,
+ TaskUmbilicalProtocol umbilical,
+ final TaskReporter reporter,
+ RawKeyValueIterator rIter,
+ RawComparator<INKEY> comparator,
+ Class<INKEY> keyClass,
+ Class<INVALUE> valueClass) throws IOException {
+ Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE> reducer =
+ ReflectionUtils.newInstance(job.getReducerClass(), job);
+ // make output collector
+ String finalName = getOutputName(getPartition());
+
+ final RecordWriter<OUTKEY, OUTVALUE> out = new OldTrackingRecordWriter<OUTKEY, OUTVALUE>(
+ this, job, reporter, finalName);
+
+ OutputCollector<OUTKEY,OUTVALUE> collector =
+ new OutputCollector<OUTKEY,OUTVALUE>() {
+ public void collect(OUTKEY key, OUTVALUE value)
+ throws IOException {
+ out.write(key, value);
+ // indicate that progress update needs to be sent
+ reporter.progress();
+ }
+ };
+
+ // apply reduce function
+ try {
+ //increment processed counter only if skipping feature is enabled
+ boolean incrProcCount = SkipBadRecords.getReducerMaxSkipGroups(job)>0 &&
+ SkipBadRecords.getAutoIncrReducerProcCount(job);
+
+ ReduceValuesIterator<INKEY,INVALUE> values = isSkipping() ?
+ new SkippingReduceValuesIterator<INKEY,INVALUE>(rIter,
+ comparator, keyClass, valueClass,
+ job, reporter, umbilical) :
+ new ReduceValuesIterator<INKEY,INVALUE>(rIter,
+ job.getOutputValueGroupingComparator(), keyClass, valueClass,
+ job, reporter);
+ values.informReduceProgress();
+ while (values.more()) {
+ reduceInputKeyCounter.increment(1);
+ reducer.reduce(values.getKey(), values, collector, reporter);
+ if(incrProcCount) {
+ reporter.incrCounter(SkipBadRecords.COUNTER_GROUP,
+ SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS, 1);
+ }
+ values.nextKey();
+ values.informReduceProgress();
+ }
+
+ //Clean up: repeated in catch block below
+ reducer.close();
+ out.close(reporter);
+ //End of clean up.
+ } catch (IOException ioe) {
+ try {
+ reducer.close();
+ } catch (IOException ignored) {}
+
+ try {
+ out.close(reporter);
+ } catch (IOException ignored) {}
+
+ throw ioe;
+ }
+ }
+
+ static class OldTrackingRecordWriter<K, V> implements RecordWriter<K, V> {
+
+ private final RecordWriter<K, V> real;
+ private final org.apache.hadoop.mapred.Counters.Counter reduceOutputCounter;
+ private final org.apache.hadoop.mapred.Counters.Counter fileOutputByteCounter;
+ private final Statistics fsStats;
+
+ @SuppressWarnings({ "deprecation", "unchecked" })
+ public OldTrackingRecordWriter(ReduceTask reduce, JobConf job,
+ TaskReporter reporter, String finalName) throws IOException {
+ this.reduceOutputCounter = reduce.reduceOutputCounter;
+ this.fileOutputByteCounter = reduce.fileOutputByteCounter;
+ Statistics matchedStats = null;
+ if (job.getOutputFormat() instanceof FileOutputFormat) {
+ matchedStats = getFsStatistics(FileOutputFormat.getOutputPath(job), job);
+ }
+ fsStats = matchedStats;
+
+ FileSystem fs = FileSystem.get(job);
+ long bytesOutPrev = getOutputBytes(fsStats);
+ this.real = job.getOutputFormat().getRecordWriter(fs, job, finalName,
+ reporter);
+ long bytesOutCurr = getOutputBytes(fsStats);
+ fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
+ }
+
+ @Override
+ public void write(K key, V value) throws IOException {
+ long bytesOutPrev = getOutputBytes(fsStats);
+ real.write(key, value);
+ long bytesOutCurr = getOutputBytes(fsStats);
+ fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
+ reduceOutputCounter.increment(1);
+ }
+
+ @Override
+ public void close(Reporter reporter) throws IOException {
+ long bytesOutPrev = getOutputBytes(fsStats);
+ real.close(reporter);
+ long bytesOutCurr = getOutputBytes(fsStats);
+ fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
+ }
+
+ private long getOutputBytes(Statistics stats) {
+ return stats == null ? 0 : stats.getBytesWritten();
+ }
+ }
+
+ static class NewTrackingRecordWriter<K,V>
+ extends org.apache.hadoop.mapreduce.RecordWriter<K,V> {
+ private final org.apache.hadoop.mapreduce.RecordWriter<K,V> real;
+ private final org.apache.hadoop.mapreduce.Counter outputRecordCounter;
+ private final org.apache.hadoop.mapreduce.Counter fileOutputByteCounter;
+ private final Statistics fsStats;
+
+ @SuppressWarnings("unchecked")
+ NewTrackingRecordWriter(ReduceTask reduce,
+ org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
+ throws InterruptedException, IOException {
+ this.outputRecordCounter = reduce.reduceOutputCounter;
+ this.fileOutputByteCounter = reduce.fileOutputByteCounter;
+
+ Statistics matchedStats = null;
+ if (reduce.outputFormat instanceof org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) {
+ matchedStats = getFsStatistics(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
+ .getOutputPath(taskContext), taskContext.getConfiguration());
+ }
+
+ fsStats = matchedStats;
+
+ long bytesOutPrev = getOutputBytes(fsStats);
+ this.real = (org.apache.hadoop.mapreduce.RecordWriter<K, V>) reduce.outputFormat
+ .getRecordWriter(taskContext);
+ long bytesOutCurr = getOutputBytes(fsStats);
+ fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
+ }
+
+ @Override
+ public void close(TaskAttemptContext context) throws IOException,
+ InterruptedException {
+ long bytesOutPrev = getOutputBytes(fsStats);
+ real.close(context);
+ long bytesOutCurr = getOutputBytes(fsStats);
+ fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
+ }
+
+ @Override
+ public void write(K key, V value) throws IOException, InterruptedException {
+ long bytesOutPrev = getOutputBytes(fsStats);
+ real.write(key,value);
+ long bytesOutCurr = getOutputBytes(fsStats);
+ fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
+ outputRecordCounter.increment(1);
+ }
+
+ private long getOutputBytes(Statistics stats) {
+ return stats == null ? 0 : stats.getBytesWritten();
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ private <INKEY,INVALUE,OUTKEY,OUTVALUE>
+ void runNewReducer(JobConf job,
+ final TaskUmbilicalProtocol umbilical,
+ final TaskReporter reporter,
+ RawKeyValueIterator rIter,
+ RawComparator<INKEY> comparator,
+ Class<INKEY> keyClass,
+ Class<INVALUE> valueClass
+ ) throws IOException,InterruptedException,
+ ClassNotFoundException {
+ // wrap value iterator to report progress.
+ final RawKeyValueIterator rawIter = rIter;
+ rIter = new RawKeyValueIterator() {
+ public void close() throws IOException {
+ rawIter.close();
+ }
+ public DataInputBuffer getKey() throws IOException {
+ return rawIter.getKey();
+ }
+ public Progress getProgress() {
+ return rawIter.getProgress();
+ }
+ public DataInputBuffer getValue() throws IOException {
+ return rawIter.getValue();
+ }
+ public boolean next() throws IOException {
+ boolean ret = rawIter.next();
+ reporter.setProgress(rawIter.getProgress().getProgress());
+ return ret;
+ }
+ };
+ // make a task context so we can get the classes
+ org.apache.hadoop.mapreduce.TaskAttemptContext taskContext =
+ new org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl(job,
+ getTaskID(), reporter);
+ // make a reducer
+ org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE> reducer =
+ (org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>)
+ ReflectionUtils.newInstance(taskContext.getReducerClass(), job);
+ org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE> trackedRW =
+ new NewTrackingRecordWriter<OUTKEY, OUTVALUE>(this, taskContext);
+ job.setBoolean("mapred.skip.on", isSkipping());
+ job.setBoolean(JobContext.SKIP_RECORDS, isSkipping());
+ org.apache.hadoop.mapreduce.Reducer.Context
+ reducerContext = createReduceContext(reducer, job, getTaskID(),
+ rIter, reduceInputKeyCounter,
+ reduceInputValueCounter,
+ trackedRW,
+ committer,
+ reporter, comparator, keyClass,
+ valueClass);
+ reducer.run(reducerContext);
+ trackedRW.close(reducerContext);
+ }
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/ReduceTaskStatus.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ReduceTaskStatus.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/ReduceTaskStatus.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ReduceTaskStatus.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/Reducer.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Reducer.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/Reducer.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Reducer.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/Reporter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Reporter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/Reporter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Reporter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/RunningJob.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RunningJob.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/RunningJob.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RunningJob.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/SequenceFileAsBinaryInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileAsBinaryInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/SequenceFileAsBinaryInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileAsBinaryInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/SequenceFileAsBinaryOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileAsBinaryOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/SequenceFileAsBinaryOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileAsBinaryOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/SequenceFileAsTextInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileAsTextInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/SequenceFileAsTextInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileAsTextInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/SequenceFileAsTextRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileAsTextRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/SequenceFileAsTextRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileAsTextRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/SequenceFileInputFilter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileInputFilter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/SequenceFileInputFilter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileInputFilter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/SequenceFileInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/SequenceFileInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/SequenceFileOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/SequenceFileOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/SequenceFileRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/SequenceFileRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/SkipBadRecords.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SkipBadRecords.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/SkipBadRecords.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SkipBadRecords.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/SortedRanges.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SortedRanges.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/SortedRanges.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SortedRanges.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SpillRecord.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SpillRecord.java
new file mode 100644
index 0000000..883b9ed
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SpillRecord.java
@@ -0,0 +1,162 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred;
+
+import java.io.DataInputStream;
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.LongBuffer;
+import java.util.zip.CheckedInputStream;
+import java.util.zip.CheckedOutputStream;
+import java.util.zip.Checksum;
+
+import org.apache.hadoop.fs.ChecksumException;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.SecureIOUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.PureJavaCrc32;
+import org.apache.hadoop.fs.FSDataInputStream;
+import static org.apache.hadoop.mapred.MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH;
+
+class SpillRecord {
+
+ /** Backing store */
+ private final ByteBuffer buf;
+ /** View of backing storage as longs */
+ private final LongBuffer entries;
+
+ public SpillRecord(int numPartitions) {
+ buf = ByteBuffer.allocate(
+ numPartitions * MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH);
+ entries = buf.asLongBuffer();
+ }
+
+ public SpillRecord(Path indexFileName, JobConf job) throws IOException {
+ this(indexFileName, job, null);
+ }
+
+ public SpillRecord(Path indexFileName, JobConf job, String expectedIndexOwner)
+ throws IOException {
+ this(indexFileName, job, new PureJavaCrc32(), expectedIndexOwner);
+ }
+
+ public SpillRecord(Path indexFileName, JobConf job, Checksum crc,
+ String expectedIndexOwner)
+ throws IOException {
+
+ final FileSystem rfs = FileSystem.getLocal(job).getRaw();
+ final FSDataInputStream in = rfs.open(indexFileName);
+ try {
+ final long length = rfs.getFileStatus(indexFileName).getLen();
+ final int partitions = (int) length / MAP_OUTPUT_INDEX_RECORD_LENGTH;
+ final int size = partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH;
+
+ buf = ByteBuffer.allocate(size);
+ if (crc != null) {
+ crc.reset();
+ CheckedInputStream chk = new CheckedInputStream(in, crc);
+ IOUtils.readFully(chk, buf.array(), 0, size);
+ if (chk.getChecksum().getValue() != in.readLong()) {
+ throw new ChecksumException("Checksum error reading spill index: " +
+ indexFileName, -1);
+ }
+ } else {
+ IOUtils.readFully(in, buf.array(), 0, size);
+ }
+ entries = buf.asLongBuffer();
+ } finally {
+ in.close();
+ }
+ }
+
+ /**
+ * Return number of IndexRecord entries in this spill.
+ */
+ public int size() {
+ return entries.capacity() / (MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8);
+ }
+
+ /**
+ * Get spill offsets for given partition.
+ */
+ public IndexRecord getIndex(int partition) {
+ final int pos = partition * MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8;
+ return new IndexRecord(entries.get(pos), entries.get(pos + 1),
+ entries.get(pos + 2));
+ }
+
+ /**
+ * Set spill offsets for given partition.
+ */
+ public void putIndex(IndexRecord rec, int partition) {
+ final int pos = partition * MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8;
+ entries.put(pos, rec.startOffset);
+ entries.put(pos + 1, rec.rawLength);
+ entries.put(pos + 2, rec.partLength);
+ }
+
+ /**
+ * Write this spill record to the location provided.
+ */
+ public void writeToFile(Path loc, JobConf job)
+ throws IOException {
+ writeToFile(loc, job, new PureJavaCrc32());
+ }
+
+ public void writeToFile(Path loc, JobConf job, Checksum crc)
+ throws IOException {
+ final FileSystem rfs = FileSystem.getLocal(job).getRaw();
+ CheckedOutputStream chk = null;
+ final FSDataOutputStream out = rfs.create(loc);
+ try {
+ if (crc != null) {
+ crc.reset();
+ chk = new CheckedOutputStream(out, crc);
+ chk.write(buf.array());
+ out.writeLong(chk.getChecksum().getValue());
+ } else {
+ out.write(buf.array());
+ }
+ } finally {
+ if (chk != null) {
+ chk.close();
+ } else {
+ out.close();
+ }
+ }
+ }
+
+}
+
+class IndexRecord {
+ long startOffset;
+ long rawLength;
+ long partLength;
+
+ public IndexRecord() { }
+
+ public IndexRecord(long startOffset, long rawLength, long partLength) {
+ this.startOffset = startOffset;
+ this.rawLength = rawLength;
+ this.partLength = partLength;
+ }
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/StatePeriodicStats.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/StatePeriodicStats.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/StatePeriodicStats.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/StatePeriodicStats.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/StatisticsCollector.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/StatisticsCollector.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/StatisticsCollector.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/StatisticsCollector.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TIPStatus.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TIPStatus.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TIPStatus.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TIPStatus.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
new file mode 100644
index 0000000..60b711b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
@@ -0,0 +1,1548 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.lang.management.GarbageCollectorMXBean;
+import java.lang.management.ManagementFactory;
+import java.text.NumberFormat;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import javax.crypto.SecretKey;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileSystem.Statistics;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.RawComparator;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.io.serializer.Deserializer;
+import org.apache.hadoop.io.serializer.SerializationFactory;
+import org.apache.hadoop.mapred.IFile.Writer;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.FileSystemCounter;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.TaskCounter;
+import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer;
+import org.apache.hadoop.mapreduce.task.ReduceContextImpl;
+import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
+import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin.*;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.util.Progress;
+import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.StringUtils;
+
+/**
+ * Base class for tasks.
+ */
+@InterfaceAudience.LimitedPrivate({"MapReduce"})
+@InterfaceStability.Unstable
+abstract public class Task implements Writable, Configurable {
+ private static final Log LOG =
+ LogFactory.getLog(Task.class);
+
+ public static String MERGED_OUTPUT_PREFIX = ".merged";
+ public static final long DEFAULT_COMBINE_RECORDS_BEFORE_PROGRESS = 10000;
+
+ /**
+ * Counters to measure the usage of the different file systems.
+ * Always return the String array with two elements. First one is the name of
+ * BYTES_READ counter and second one is of the BYTES_WRITTEN counter.
+ */
+ protected static String[] getFileSystemCounterNames(String uriScheme) {
+ String scheme = uriScheme.toUpperCase();
+ return new String[]{scheme+"_BYTES_READ", scheme+"_BYTES_WRITTEN"};
+ }
+
+ /**
+ * Name of the FileSystem counters' group
+ */
+ protected static final String FILESYSTEM_COUNTER_GROUP = "FileSystemCounters";
+
+ ///////////////////////////////////////////////////////////
+ // Helper methods to construct task-output paths
+ ///////////////////////////////////////////////////////////
+
+ /** Construct output file names so that, when an output directory listing is
+ * sorted lexicographically, positions correspond to output partitions.*/
+ private static final NumberFormat NUMBER_FORMAT = NumberFormat.getInstance();
+ static {
+ NUMBER_FORMAT.setMinimumIntegerDigits(5);
+ NUMBER_FORMAT.setGroupingUsed(false);
+ }
+
+ static synchronized String getOutputName(int partition) {
+ return "part-" + NUMBER_FORMAT.format(partition);
+ }
+
+ ////////////////////////////////////////////
+ // Fields
+ ////////////////////////////////////////////
+
+ private String jobFile; // job configuration file
+ private String user; // user running the job
+ private TaskAttemptID taskId; // unique, includes job id
+ private int partition; // id within job
+ TaskStatus taskStatus; // current status of the task
+ protected JobStatus.State jobRunStateForCleanup;
+ protected boolean jobCleanup = false;
+ protected boolean jobSetup = false;
+ protected boolean taskCleanup = false;
+
+ // An opaque data field used to attach extra data to each task. This is used
+ // by the Hadoop scheduler for Mesos to associate a Mesos task ID with each
+ // task and recover these IDs on the TaskTracker.
+ protected BytesWritable extraData = new BytesWritable();
+
+ //skip ranges based on failed ranges from previous attempts
+ private SortedRanges skipRanges = new SortedRanges();
+ private boolean skipping = false;
+ private boolean writeSkipRecs = true;
+
+ //currently processing record start index
+ private volatile long currentRecStartIndex;
+ private Iterator<Long> currentRecIndexIterator =
+ skipRanges.skipRangeIterator();
+
+ private ResourceCalculatorPlugin resourceCalculator = null;
+ private long initCpuCumulativeTime = 0;
+
+ protected JobConf conf;
+ protected MapOutputFile mapOutputFile;
+ protected LocalDirAllocator lDirAlloc;
+ private final static int MAX_RETRIES = 10;
+ protected JobContext jobContext;
+ protected TaskAttemptContext taskContext;
+ protected org.apache.hadoop.mapreduce.OutputFormat<?,?> outputFormat;
+ protected org.apache.hadoop.mapreduce.OutputCommitter committer;
+ protected final Counters.Counter spilledRecordsCounter;
+ protected final Counters.Counter failedShuffleCounter;
+ protected final Counters.Counter mergedMapOutputsCounter;
+ private int numSlotsRequired;
+ protected TaskUmbilicalProtocol umbilical;
+ protected SecretKey tokenSecret;
+ protected GcTimeUpdater gcUpdater;
+
+ ////////////////////////////////////////////
+ // Constructors
+ ////////////////////////////////////////////
+
+ public Task() {
+ taskStatus = TaskStatus.createTaskStatus(isMapTask());
+ taskId = new TaskAttemptID();
+ spilledRecordsCounter =
+ counters.findCounter(TaskCounter.SPILLED_RECORDS);
+ failedShuffleCounter =
+ counters.findCounter(TaskCounter.FAILED_SHUFFLE);
+ mergedMapOutputsCounter =
+ counters.findCounter(TaskCounter.MERGED_MAP_OUTPUTS);
+ gcUpdater = new GcTimeUpdater();
+ }
+
+ public Task(String jobFile, TaskAttemptID taskId, int partition,
+ int numSlotsRequired) {
+ this.jobFile = jobFile;
+ this.taskId = taskId;
+
+ this.partition = partition;
+ this.numSlotsRequired = numSlotsRequired;
+ this.taskStatus = TaskStatus.createTaskStatus(isMapTask(), this.taskId,
+ 0.0f, numSlotsRequired,
+ TaskStatus.State.UNASSIGNED,
+ "", "", "",
+ isMapTask() ?
+ TaskStatus.Phase.MAP :
+ TaskStatus.Phase.SHUFFLE,
+ counters);
+ spilledRecordsCounter = counters.findCounter(TaskCounter.SPILLED_RECORDS);
+ failedShuffleCounter = counters.findCounter(TaskCounter.FAILED_SHUFFLE);
+ mergedMapOutputsCounter =
+ counters.findCounter(TaskCounter.MERGED_MAP_OUTPUTS);
+ gcUpdater = new GcTimeUpdater();
+ }
+
+ ////////////////////////////////////////////
+ // Accessors
+ ////////////////////////////////////////////
+ public void setJobFile(String jobFile) { this.jobFile = jobFile; }
+ public String getJobFile() { return jobFile; }
+ public TaskAttemptID getTaskID() { return taskId; }
+ public int getNumSlotsRequired() {
+ return numSlotsRequired;
+ }
+
+ Counters getCounters() { return counters; }
+
+ /**
+ * Get the job name for this task.
+ * @return the job name
+ */
+ public JobID getJobID() {
+ return taskId.getJobID();
+ }
+
+ /**
+ * Set the job token secret
+ * @param tokenSecret the secret
+ */
+ public void setJobTokenSecret(SecretKey tokenSecret) {
+ this.tokenSecret = tokenSecret;
+ }
+
+ /**
+ * Get the job token secret
+ * @return the token secret
+ */
+ public SecretKey getJobTokenSecret() {
+ return this.tokenSecret;
+ }
+
+
+ /**
+ * Get the index of this task within the job.
+ * @return the integer part of the task id
+ */
+ public int getPartition() {
+ return partition;
+ }
+ /**
+ * Return current phase of the task.
+ * needs to be synchronized as communication thread sends the phase every second
+ * @return the curent phase of the task
+ */
+ public synchronized TaskStatus.Phase getPhase(){
+ return this.taskStatus.getPhase();
+ }
+ /**
+ * Set current phase of the task.
+ * @param phase task phase
+ */
+ protected synchronized void setPhase(TaskStatus.Phase phase){
+ this.taskStatus.setPhase(phase);
+ }
+
+ /**
+ * Get whether to write skip records.
+ */
+ protected boolean toWriteSkipRecs() {
+ return writeSkipRecs;
+ }
+
+ /**
+ * Set whether to write skip records.
+ */
+ protected void setWriteSkipRecs(boolean writeSkipRecs) {
+ this.writeSkipRecs = writeSkipRecs;
+ }
+
+ /**
+ * Report a fatal error to the parent (task) tracker.
+ */
+ protected void reportFatalError(TaskAttemptID id, Throwable throwable,
+ String logMsg) {
+ LOG.fatal(logMsg);
+ Throwable tCause = throwable.getCause();
+ String cause = tCause == null
+ ? StringUtils.stringifyException(throwable)
+ : StringUtils.stringifyException(tCause);
+ try {
+ umbilical.fatalError(id, cause);
+ } catch (IOException ioe) {
+ LOG.fatal("Failed to contact the tasktracker", ioe);
+ System.exit(-1);
+ }
+ }
+
+ /**
+ * Gets a handle to the Statistics instance based on the scheme associated
+ * with path.
+ *
+ * @param path the path.
+ * @param conf the configuration to extract the scheme from if not part of
+ * the path.
+ * @return a Statistics instance, or null if none is found for the scheme.
+ */
+ protected static Statistics getFsStatistics(Path path, Configuration conf) throws IOException {
+ Statistics matchedStats = null;
+ path = path.getFileSystem(conf).makeQualified(path);
+ String scheme = path.toUri().getScheme();
+ for (Statistics stats : FileSystem.getAllStatistics()) {
+ if (stats.getScheme().equals(scheme)) {
+ matchedStats = stats;
+ break;
+ }
+ }
+ return matchedStats;
+ }
+
+ /**
+ * Get skipRanges.
+ */
+ public SortedRanges getSkipRanges() {
+ return skipRanges;
+ }
+
+ /**
+ * Set skipRanges.
+ */
+ public void setSkipRanges(SortedRanges skipRanges) {
+ this.skipRanges = skipRanges;
+ }
+
+ /**
+ * Is Task in skipping mode.
+ */
+ public boolean isSkipping() {
+ return skipping;
+ }
+
+ /**
+ * Sets whether to run Task in skipping mode.
+ * @param skipping
+ */
+ public void setSkipping(boolean skipping) {
+ this.skipping = skipping;
+ }
+
+ /**
+ * Return current state of the task.
+ * needs to be synchronized as communication thread
+ * sends the state every second
+ * @return
+ */
+ synchronized TaskStatus.State getState(){
+ return this.taskStatus.getRunState();
+ }
+ /**
+ * Set current state of the task.
+ * @param state
+ */
+ synchronized void setState(TaskStatus.State state){
+ this.taskStatus.setRunState(state);
+ }
+
+ void setTaskCleanupTask() {
+ taskCleanup = true;
+ }
+
+ boolean isTaskCleanupTask() {
+ return taskCleanup;
+ }
+
+ boolean isJobCleanupTask() {
+ return jobCleanup;
+ }
+
+ boolean isJobAbortTask() {
+ // the task is an abort task if its marked for cleanup and the final
+ // expected state is either failed or killed.
+ return isJobCleanupTask()
+ && (jobRunStateForCleanup == JobStatus.State.KILLED
+ || jobRunStateForCleanup == JobStatus.State.FAILED);
+ }
+
+ boolean isJobSetupTask() {
+ return jobSetup;
+ }
+
+ void setJobSetupTask() {
+ jobSetup = true;
+ }
+
+ void setJobCleanupTask() {
+ jobCleanup = true;
+ }
+
+ /**
+ * Sets the task to do job abort in the cleanup.
+ * @param status the final runstate of the job.
+ */
+ void setJobCleanupTaskState(JobStatus.State status) {
+ jobRunStateForCleanup = status;
+ }
+
+ boolean isMapOrReduce() {
+ return !jobSetup && !jobCleanup && !taskCleanup;
+ }
+
+ /**
+ * Get the name of the user running the job/task. TaskTracker needs task's
+ * user name even before it's JobConf is localized. So we explicitly serialize
+ * the user name.
+ *
+ * @return user
+ */
+ String getUser() {
+ return user;
+ }
+
+ void setUser(String user) {
+ this.user = user;
+ }
+
+ ////////////////////////////////////////////
+ // Writable methods
+ ////////////////////////////////////////////
+
+ public void write(DataOutput out) throws IOException {
+ Text.writeString(out, jobFile);
+ taskId.write(out);
+ out.writeInt(partition);
+ out.writeInt(numSlotsRequired);
+ taskStatus.write(out);
+ skipRanges.write(out);
+ out.writeBoolean(skipping);
+ out.writeBoolean(jobCleanup);
+ if (jobCleanup) {
+ WritableUtils.writeEnum(out, jobRunStateForCleanup);
+ }
+ out.writeBoolean(jobSetup);
+ out.writeBoolean(writeSkipRecs);
+ out.writeBoolean(taskCleanup);
+ Text.writeString(out, user);
+ extraData.write(out);
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ jobFile = Text.readString(in);
+ taskId = TaskAttemptID.read(in);
+ partition = in.readInt();
+ numSlotsRequired = in.readInt();
+ taskStatus.readFields(in);
+ skipRanges.readFields(in);
+ currentRecIndexIterator = skipRanges.skipRangeIterator();
+ currentRecStartIndex = currentRecIndexIterator.next();
+ skipping = in.readBoolean();
+ jobCleanup = in.readBoolean();
+ if (jobCleanup) {
+ jobRunStateForCleanup =
+ WritableUtils.readEnum(in, JobStatus.State.class);
+ }
+ jobSetup = in.readBoolean();
+ writeSkipRecs = in.readBoolean();
+ taskCleanup = in.readBoolean();
+ if (taskCleanup) {
+ setPhase(TaskStatus.Phase.CLEANUP);
+ }
+ user = Text.readString(in);
+ extraData.readFields(in);
+ }
+
+ @Override
+ public String toString() { return taskId.toString(); }
+
+ /**
+ * Localize the given JobConf to be specific for this task.
+ */
+ public void localizeConfiguration(JobConf conf) throws IOException {
+ conf.set(JobContext.TASK_ID, taskId.getTaskID().toString());
+ conf.set(JobContext.TASK_ATTEMPT_ID, taskId.toString());
+ conf.setBoolean(JobContext.TASK_ISMAP, isMapTask());
+ conf.setInt(JobContext.TASK_PARTITION, partition);
+ conf.set(JobContext.ID, taskId.getJobID().toString());
+ }
+
+ /** Run this task as a part of the named job. This method is executed in the
+ * child process and is what invokes user-supplied map, reduce, etc. methods.
+ * @param umbilical for progress reports
+ */
+ public abstract void run(JobConf job, TaskUmbilicalProtocol umbilical)
+ throws IOException, ClassNotFoundException, InterruptedException;
+
+ /** The number of milliseconds between progress reports. */
+ public static final int PROGRESS_INTERVAL = 3000;
+
+ private transient Progress taskProgress = new Progress();
+
+ // Current counters
+ private transient Counters counters = new Counters();
+
+ /* flag to track whether task is done */
+ private AtomicBoolean taskDone = new AtomicBoolean(false);
+
+ public abstract boolean isMapTask();
+
+ public Progress getProgress() { return taskProgress; }
+
+ public void initialize(JobConf job, JobID id,
+ Reporter reporter,
+ boolean useNewApi) throws IOException,
+ ClassNotFoundException,
+ InterruptedException {
+ jobContext = new JobContextImpl(job, id, reporter);
+ taskContext = new TaskAttemptContextImpl(job, taskId, reporter);
+ if (getState() == TaskStatus.State.UNASSIGNED) {
+ setState(TaskStatus.State.RUNNING);
+ }
+ if (useNewApi) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("using new api for output committer");
+ }
+ outputFormat =
+ ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), job);
+ committer = outputFormat.getOutputCommitter(taskContext);
+ } else {
+ committer = conf.getOutputCommitter();
+ }
+ Path outputPath = FileOutputFormat.getOutputPath(conf);
+ if (outputPath != null) {
+ if ((committer instanceof FileOutputCommitter)) {
+ FileOutputFormat.setWorkOutputPath(conf,
+ ((FileOutputCommitter)committer).getTempTaskOutputPath(taskContext));
+ } else {
+ FileOutputFormat.setWorkOutputPath(conf, outputPath);
+ }
+ }
+ committer.setupTask(taskContext);
+ Class<? extends ResourceCalculatorPlugin> clazz =
+ conf.getClass(MRConfig.RESOURCE_CALCULATOR_PLUGIN,
+ null, ResourceCalculatorPlugin.class);
+ resourceCalculator = ResourceCalculatorPlugin
+ .getResourceCalculatorPlugin(clazz, conf);
+ LOG.info(" Using ResourceCalculatorPlugin : " + resourceCalculator);
+ if (resourceCalculator != null) {
+ initCpuCumulativeTime =
+ resourceCalculator.getProcResourceValues().getCumulativeCpuTime();
+ }
+ }
+
+ @InterfaceAudience.Private
+ @InterfaceStability.Unstable
+ protected class TaskReporter
+ extends org.apache.hadoop.mapreduce.StatusReporter
+ implements Runnable, Reporter {
+ private TaskUmbilicalProtocol umbilical;
+ private InputSplit split = null;
+ private Progress taskProgress;
+ private Thread pingThread = null;
+
+ /**
+ * flag that indicates whether progress update needs to be sent to parent.
+ * If true, it has been set. If false, it has been reset.
+ * Using AtomicBoolean since we need an atomic read & reset method.
+ */
+ private AtomicBoolean progressFlag = new AtomicBoolean(false);
+
+ TaskReporter(Progress taskProgress,
+ TaskUmbilicalProtocol umbilical) {
+ this.umbilical = umbilical;
+ this.taskProgress = taskProgress;
+ }
+
+ // getters and setters for flag
+ void setProgressFlag() {
+ progressFlag.set(true);
+ }
+ boolean resetProgressFlag() {
+ return progressFlag.getAndSet(false);
+ }
+ public void setStatus(String status) {
+ taskProgress.setStatus(status);
+ // indicate that progress update needs to be sent
+ setProgressFlag();
+ }
+ public void setProgress(float progress) {
+ // set current phase progress.
+ // This method assumes that task has phases.
+ taskProgress.phase().set(progress);
+ // indicate that progress update needs to be sent
+ setProgressFlag();
+ }
+
+ public float getProgress() {
+ return taskProgress.getProgress();
+ };
+
+ public void progress() {
+ // indicate that progress update needs to be sent
+ setProgressFlag();
+ }
+ public Counters.Counter getCounter(String group, String name) {
+ Counters.Counter counter = null;
+ if (counters != null) {
+ counter = counters.findCounter(group, name);
+ }
+ return counter;
+ }
+ public Counters.Counter getCounter(Enum<?> name) {
+ return counters == null ? null : counters.findCounter(name);
+ }
+ public void incrCounter(Enum key, long amount) {
+ if (counters != null) {
+ counters.incrCounter(key, amount);
+ }
+ setProgressFlag();
+ }
+ public void incrCounter(String group, String counter, long amount) {
+ if (counters != null) {
+ counters.incrCounter(group, counter, amount);
+ }
+ if(skipping && SkipBadRecords.COUNTER_GROUP.equals(group) && (
+ SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS.equals(counter) ||
+ SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS.equals(counter))) {
+ //if application reports the processed records, move the
+ //currentRecStartIndex to the next.
+ //currentRecStartIndex is the start index which has not yet been
+ //finished and is still in task's stomach.
+ for(int i=0;i<amount;i++) {
+ currentRecStartIndex = currentRecIndexIterator.next();
+ }
+ }
+ setProgressFlag();
+ }
+ public void setInputSplit(InputSplit split) {
+ this.split = split;
+ }
+ public InputSplit getInputSplit() throws UnsupportedOperationException {
+ if (split == null) {
+ throw new UnsupportedOperationException("Input only available on map");
+ } else {
+ return split;
+ }
+ }
+ /**
+ * The communication thread handles communication with the parent (Task Tracker).
+ * It sends progress updates if progress has been made or if the task needs to
+ * let the parent know that it's alive. It also pings the parent to see if it's alive.
+ */
+ public void run() {
+ final int MAX_RETRIES = 3;
+ int remainingRetries = MAX_RETRIES;
+ // get current flag value and reset it as well
+ boolean sendProgress = resetProgressFlag();
+ while (!taskDone.get()) {
+ try {
+ boolean taskFound = true; // whether TT knows about this task
+ // sleep for a bit
+ try {
+ Thread.sleep(PROGRESS_INTERVAL);
+ }
+ catch (InterruptedException e) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(getTaskID() + " Progress/ping thread exiting " +
+ "since it got interrupted");
+ }
+ break;
+ }
+
+ if (sendProgress) {
+ // we need to send progress update
+ updateCounters();
+ taskStatus.statusUpdate(taskProgress.get(),
+ taskProgress.toString(),
+ counters);
+ taskFound = umbilical.statusUpdate(taskId, taskStatus);
+ taskStatus.clearStatus();
+ }
+ else {
+ // send ping
+ taskFound = umbilical.ping(taskId);
+ }
+
+ // if Task Tracker is not aware of our task ID (probably because it died and
+ // came back up), kill ourselves
+ if (!taskFound) {
+ LOG.warn("Parent died. Exiting "+taskId);
+ System.exit(66);
+ }
+
+ sendProgress = resetProgressFlag();
+ remainingRetries = MAX_RETRIES;
+ }
+ catch (Throwable t) {
+ LOG.info("Communication exception: " + StringUtils.stringifyException(t));
+ remainingRetries -=1;
+ if (remainingRetries == 0) {
+ ReflectionUtils.logThreadInfo(LOG, "Communication exception", 0);
+ LOG.warn("Last retry, killing "+taskId);
+ System.exit(65);
+ }
+ }
+ }
+ }
+ public void startCommunicationThread() {
+ if (pingThread == null) {
+ pingThread = new Thread(this, "communication thread");
+ pingThread.setDaemon(true);
+ pingThread.start();
+ }
+ }
+ public void stopCommunicationThread() throws InterruptedException {
+ if (pingThread != null) {
+ pingThread.interrupt();
+ pingThread.join();
+ }
+ }
+ }
+
+ /**
+ * Reports the next executing record range to TaskTracker.
+ *
+ * @param umbilical
+ * @param nextRecIndex the record index which would be fed next.
+ * @throws IOException
+ */
+ protected void reportNextRecordRange(final TaskUmbilicalProtocol umbilical,
+ long nextRecIndex) throws IOException{
+ //currentRecStartIndex is the start index which has not yet been finished
+ //and is still in task's stomach.
+ long len = nextRecIndex - currentRecStartIndex +1;
+ SortedRanges.Range range =
+ new SortedRanges.Range(currentRecStartIndex, len);
+ taskStatus.setNextRecordRange(range);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("sending reportNextRecordRange " + range);
+ }
+ umbilical.reportNextRecordRange(taskId, range);
+ }
+
+ /**
+ * Create a TaskReporter and start communication thread
+ */
+ TaskReporter startReporter(final TaskUmbilicalProtocol umbilical) {
+ // start thread that will handle communication with parent
+ TaskReporter reporter = new TaskReporter(getProgress(), umbilical);
+ reporter.startCommunicationThread();
+ return reporter;
+ }
+
+ /**
+ * Update resource information counters
+ */
+ void updateResourceCounters() {
+ // Update generic resource counters
+ updateHeapUsageCounter();
+
+ // Updating resources specified in ResourceCalculatorPlugin
+ if (resourceCalculator == null) {
+ return;
+ }
+ ProcResourceValues res = resourceCalculator.getProcResourceValues();
+ long cpuTime = res.getCumulativeCpuTime();
+ long pMem = res.getPhysicalMemorySize();
+ long vMem = res.getVirtualMemorySize();
+ // Remove the CPU time consumed previously by JVM reuse
+ cpuTime -= initCpuCumulativeTime;
+ counters.findCounter(TaskCounter.CPU_MILLISECONDS).setValue(cpuTime);
+ counters.findCounter(TaskCounter.PHYSICAL_MEMORY_BYTES).setValue(pMem);
+ counters.findCounter(TaskCounter.VIRTUAL_MEMORY_BYTES).setValue(vMem);
+ }
+
+ /**
+ * An updater that tracks the amount of time this task has spent in GC.
+ */
+ class GcTimeUpdater {
+ private long lastGcMillis = 0;
+ private List<GarbageCollectorMXBean> gcBeans = null;
+
+ public GcTimeUpdater() {
+ this.gcBeans = ManagementFactory.getGarbageCollectorMXBeans();
+ getElapsedGc(); // Initialize 'lastGcMillis' with the current time spent.
+ }
+
+ /**
+ * @return the number of milliseconds that the gc has used for CPU
+ * since the last time this method was called.
+ */
+ protected long getElapsedGc() {
+ long thisGcMillis = 0;
+ for (GarbageCollectorMXBean gcBean : gcBeans) {
+ thisGcMillis += gcBean.getCollectionTime();
+ }
+
+ long delta = thisGcMillis - lastGcMillis;
+ this.lastGcMillis = thisGcMillis;
+ return delta;
+ }
+
+ /**
+ * Increment the gc-elapsed-time counter.
+ */
+ public void incrementGcCounter() {
+ if (null == counters) {
+ return; // nothing to do.
+ }
+
+ Counter gcCounter = counters.findCounter(TaskCounter.GC_TIME_MILLIS);
+ if (null != gcCounter) {
+ gcCounter.increment(getElapsedGc());
+ }
+ }
+ }
+
+ /**
+ * An updater that tracks the last number reported for a given file
+ * system and only creates the counters when they are needed.
+ */
+ class FileSystemStatisticUpdater {
+ private FileSystem.Statistics stats;
+ private Counters.Counter readBytesCounter, writeBytesCounter,
+ readOpsCounter, largeReadOpsCounter, writeOpsCounter;
+
+ FileSystemStatisticUpdater(FileSystem.Statistics stats) {
+ this.stats = stats;
+ }
+
+ void updateCounters() {
+ String scheme = stats.getScheme();
+ if (readBytesCounter == null) {
+ readBytesCounter = counters.findCounter(scheme,
+ FileSystemCounter.BYTES_READ);
+ }
+ readBytesCounter.setValue(stats.getBytesRead());
+ if (writeBytesCounter == null) {
+ writeBytesCounter = counters.findCounter(scheme,
+ FileSystemCounter.BYTES_WRITTEN);
+ }
+ writeBytesCounter.setValue(stats.getBytesWritten());
+ if (readOpsCounter == null) {
+ readOpsCounter = counters.findCounter(scheme,
+ FileSystemCounter.READ_OPS);
+ }
+ readOpsCounter.setValue(stats.getReadOps());
+ if (largeReadOpsCounter == null) {
+ largeReadOpsCounter = counters.findCounter(scheme,
+ FileSystemCounter.LARGE_READ_OPS);
+ }
+ largeReadOpsCounter.setValue(stats.getLargeReadOps());
+ if (writeOpsCounter == null) {
+ writeOpsCounter = counters.findCounter(scheme,
+ FileSystemCounter.WRITE_OPS);
+ }
+ writeOpsCounter.setValue(stats.getWriteOps());
+ }
+ }
+
+ /**
+ * A Map where Key-> URIScheme and value->FileSystemStatisticUpdater
+ */
+ private Map<String, FileSystemStatisticUpdater> statisticUpdaters =
+ new HashMap<String, FileSystemStatisticUpdater>();
+
+ private synchronized void updateCounters() {
+ for(Statistics stat: FileSystem.getAllStatistics()) {
+ String uriScheme = stat.getScheme();
+ FileSystemStatisticUpdater updater = statisticUpdaters.get(uriScheme);
+ if(updater==null) {//new FileSystem has been found in the cache
+ updater = new FileSystemStatisticUpdater(stat);
+ statisticUpdaters.put(uriScheme, updater);
+ }
+ updater.updateCounters();
+ }
+
+ gcUpdater.incrementGcCounter();
+ updateResourceCounters();
+ }
+
+ /**
+ * Updates the {@link TaskCounter#COMMITTED_HEAP_BYTES} counter to reflect the
+ * current total committed heap space usage of this JVM.
+ */
+ @SuppressWarnings("deprecation")
+ private void updateHeapUsageCounter() {
+ long currentHeapUsage = Runtime.getRuntime().totalMemory();
+ counters.findCounter(TaskCounter.COMMITTED_HEAP_BYTES)
+ .setValue(currentHeapUsage);
+ }
+
+ public void done(TaskUmbilicalProtocol umbilical,
+ TaskReporter reporter
+ ) throws IOException, InterruptedException {
+ LOG.info("Task:" + taskId + " is done."
+ + " And is in the process of commiting");
+ updateCounters();
+
+ boolean commitRequired = isCommitRequired();
+ if (commitRequired) {
+ int retries = MAX_RETRIES;
+ setState(TaskStatus.State.COMMIT_PENDING);
+ // say the task tracker that task is commit pending
+ while (true) {
+ try {
+ umbilical.commitPending(taskId, taskStatus);
+ break;
+ } catch (InterruptedException ie) {
+ // ignore
+ } catch (IOException ie) {
+ LOG.warn("Failure sending commit pending: " +
+ StringUtils.stringifyException(ie));
+ if (--retries == 0) {
+ System.exit(67);
+ }
+ }
+ }
+ //wait for commit approval and commit
+ commit(umbilical, reporter, committer);
+ }
+ taskDone.set(true);
+ reporter.stopCommunicationThread();
+ // Make sure we send at least one set of counter increments. It's
+ // ok to call updateCounters() in this thread after comm thread stopped.
+ updateCounters();
+ sendLastUpdate(umbilical);
+ //signal the tasktracker that we are done
+ sendDone(umbilical);
+ }
+
+ /**
+ * Checks if this task has anything to commit, depending on the
+ * type of task, as well as on whether the {@link OutputCommitter}
+ * has anything to commit.
+ *
+ * @return true if the task has to commit
+ * @throws IOException
+ */
+ boolean isCommitRequired() throws IOException {
+ boolean commitRequired = false;
+ if (isMapOrReduce()) {
+ commitRequired = committer.needsTaskCommit(taskContext);
+ }
+ return commitRequired;
+ }
+
+ /**
+ * Send a status update to the task tracker
+ * @param umbilical
+ * @throws IOException
+ */
+ public void statusUpdate(TaskUmbilicalProtocol umbilical)
+ throws IOException {
+ int retries = MAX_RETRIES;
+ while (true) {
+ try {
+ if (!umbilical.statusUpdate(getTaskID(), taskStatus)) {
+ LOG.warn("Parent died. Exiting "+taskId);
+ System.exit(66);
+ }
+ taskStatus.clearStatus();
+ return;
+ } catch (InterruptedException ie) {
+ Thread.currentThread().interrupt(); // interrupt ourself
+ } catch (IOException ie) {
+ LOG.warn("Failure sending status update: " +
+ StringUtils.stringifyException(ie));
+ if (--retries == 0) {
+ throw ie;
+ }
+ }
+ }
+ }
+
+ /**
+ * Sends last status update before sending umbilical.done();
+ */
+ private void sendLastUpdate(TaskUmbilicalProtocol umbilical)
+ throws IOException {
+ taskStatus.setOutputSize(calculateOutputSize());
+ // send a final status report
+ taskStatus.statusUpdate(taskProgress.get(),
+ taskProgress.toString(),
+ counters);
+ statusUpdate(umbilical);
+ }
+
+ /**
+ * Calculates the size of output for this task.
+ *
+ * @return -1 if it can't be found.
+ */
+ private long calculateOutputSize() throws IOException {
+ if (!isMapOrReduce()) {
+ return -1;
+ }
+
+ if (isMapTask() && conf.getNumReduceTasks() > 0) {
+ try {
+ Path mapOutput = mapOutputFile.getOutputFile();
+ FileSystem localFS = FileSystem.getLocal(conf);
+ return localFS.getFileStatus(mapOutput).getLen();
+ } catch (IOException e) {
+ LOG.warn ("Could not find output size " , e);
+ }
+ }
+ return -1;
+ }
+
+ private void sendDone(TaskUmbilicalProtocol umbilical) throws IOException {
+ int retries = MAX_RETRIES;
+ while (true) {
+ try {
+ umbilical.done(getTaskID());
+ LOG.info("Task '" + taskId + "' done.");
+ return;
+ } catch (IOException ie) {
+ LOG.warn("Failure signalling completion: " +
+ StringUtils.stringifyException(ie));
+ if (--retries == 0) {
+ throw ie;
+ }
+ }
+ }
+ }
+
+ private void commit(TaskUmbilicalProtocol umbilical,
+ TaskReporter reporter,
+ org.apache.hadoop.mapreduce.OutputCommitter committer
+ ) throws IOException {
+ int retries = MAX_RETRIES;
+ while (true) {
+ try {
+ while (!umbilical.canCommit(taskId)) {
+ try {
+ Thread.sleep(1000);
+ } catch(InterruptedException ie) {
+ //ignore
+ }
+ reporter.setProgressFlag();
+ }
+ break;
+ } catch (IOException ie) {
+ LOG.warn("Failure asking whether task can commit: " +
+ StringUtils.stringifyException(ie));
+ if (--retries == 0) {
+ //if it couldn't query successfully then delete the output
+ discardOutput(taskContext);
+ System.exit(68);
+ }
+ }
+ }
+
+ // task can Commit now
+ try {
+ LOG.info("Task " + taskId + " is allowed to commit now");
+ committer.commitTask(taskContext);
+ return;
+ } catch (IOException iee) {
+ LOG.warn("Failure committing: " +
+ StringUtils.stringifyException(iee));
+ //if it couldn't commit a successfully then delete the output
+ discardOutput(taskContext);
+ throw iee;
+ }
+ }
+
+ private
+ void discardOutput(TaskAttemptContext taskContext) {
+ try {
+ committer.abortTask(taskContext);
+ } catch (IOException ioe) {
+ LOG.warn("Failure cleaning up: " +
+ StringUtils.stringifyException(ioe));
+ }
+ }
+
+ protected void runTaskCleanupTask(TaskUmbilicalProtocol umbilical,
+ TaskReporter reporter)
+ throws IOException, InterruptedException {
+ taskCleanup(umbilical);
+ done(umbilical, reporter);
+ }
+
+ void taskCleanup(TaskUmbilicalProtocol umbilical)
+ throws IOException {
+ // set phase for this task
+ setPhase(TaskStatus.Phase.CLEANUP);
+ getProgress().setStatus("cleanup");
+ statusUpdate(umbilical);
+ LOG.info("Runnning cleanup for the task");
+ // do the cleanup
+ committer.abortTask(taskContext);
+ }
+
+ protected void runJobCleanupTask(TaskUmbilicalProtocol umbilical,
+ TaskReporter reporter
+ ) throws IOException, InterruptedException {
+ // set phase for this task
+ setPhase(TaskStatus.Phase.CLEANUP);
+ getProgress().setStatus("cleanup");
+ statusUpdate(umbilical);
+ // do the cleanup
+ LOG.info("Cleaning up job");
+ if (jobRunStateForCleanup == JobStatus.State.FAILED
+ || jobRunStateForCleanup == JobStatus.State.KILLED) {
+ LOG.info("Aborting job with runstate : " + jobRunStateForCleanup.name());
+ if (conf.getUseNewMapper()) {
+ committer.abortJob(jobContext, jobRunStateForCleanup);
+ } else {
+ org.apache.hadoop.mapred.OutputCommitter oldCommitter =
+ (org.apache.hadoop.mapred.OutputCommitter)committer;
+ oldCommitter.abortJob(jobContext, jobRunStateForCleanup);
+ }
+ } else if (jobRunStateForCleanup == JobStatus.State.SUCCEEDED){
+ LOG.info("Committing job");
+ committer.commitJob(jobContext);
+ } else {
+ throw new IOException("Invalid state of the job for cleanup. State found "
+ + jobRunStateForCleanup + " expecting "
+ + JobStatus.State.SUCCEEDED + ", "
+ + JobStatus.State.FAILED + " or "
+ + JobStatus.State.KILLED);
+ }
+
+ // delete the staging area for the job
+ JobConf conf = new JobConf(jobContext.getConfiguration());
+ if (!keepTaskFiles(conf)) {
+ String jobTempDir = conf.get("mapreduce.job.dir");
+ Path jobTempDirPath = new Path(jobTempDir);
+ FileSystem fs = jobTempDirPath.getFileSystem(conf);
+ fs.delete(jobTempDirPath, true);
+ }
+ done(umbilical, reporter);
+ }
+
+ protected boolean keepTaskFiles(JobConf conf) {
+ return (conf.getKeepTaskFilesPattern() != null || conf
+ .getKeepFailedTaskFiles());
+ }
+
+ protected void runJobSetupTask(TaskUmbilicalProtocol umbilical,
+ TaskReporter reporter
+ ) throws IOException, InterruptedException {
+ // do the setup
+ getProgress().setStatus("setup");
+ committer.setupJob(jobContext);
+ done(umbilical, reporter);
+ }
+
+ public void setConf(Configuration conf) {
+ if (conf instanceof JobConf) {
+ this.conf = (JobConf) conf;
+ } else {
+ this.conf = new JobConf(conf);
+ }
+ this.mapOutputFile = ReflectionUtils.newInstance(
+ conf.getClass(MRConfig.TASK_LOCAL_OUTPUT_CLASS,
+ MROutputFiles.class, MapOutputFile.class), conf);
+ this.lDirAlloc = new LocalDirAllocator(MRConfig.LOCAL_DIR);
+ // add the static resolutions (this is required for the junit to
+ // work on testcases that simulate multiple nodes on a single physical
+ // node.
+ String hostToResolved[] = conf.getStrings(MRConfig.STATIC_RESOLUTIONS);
+ if (hostToResolved != null) {
+ for (String str : hostToResolved) {
+ String name = str.substring(0, str.indexOf('='));
+ String resolvedName = str.substring(str.indexOf('=') + 1);
+ NetUtils.addStaticResolution(name, resolvedName);
+ }
+ }
+ }
+
+ public Configuration getConf() {
+ return this.conf;
+ }
+
+ public MapOutputFile getMapOutputFile() {
+ return mapOutputFile;
+ }
+
+ /**
+ * OutputCollector for the combiner.
+ */
+ @InterfaceAudience.Private
+ @InterfaceStability.Unstable
+ public static class CombineOutputCollector<K extends Object, V extends Object>
+ implements OutputCollector<K, V> {
+ private Writer<K, V> writer;
+ private Counters.Counter outCounter;
+ private Progressable progressable;
+ private long progressBar;
+
+ public CombineOutputCollector(Counters.Counter outCounter, Progressable progressable, Configuration conf) {
+ this.outCounter = outCounter;
+ this.progressable=progressable;
+ progressBar = conf.getLong(MRJobConfig.COMBINE_RECORDS_BEFORE_PROGRESS, DEFAULT_COMBINE_RECORDS_BEFORE_PROGRESS);
+ }
+
+ public synchronized void setWriter(Writer<K, V> writer) {
+ this.writer = writer;
+ }
+
+ public synchronized void collect(K key, V value)
+ throws IOException {
+ outCounter.increment(1);
+ writer.append(key, value);
+ if ((outCounter.getValue() % progressBar) == 0) {
+ progressable.progress();
+ }
+ }
+ }
+
+ /** Iterates values while keys match in sorted input. */
+ static class ValuesIterator<KEY,VALUE> implements Iterator<VALUE> {
+ protected RawKeyValueIterator in; //input iterator
+ private KEY key; // current key
+ private KEY nextKey;
+ private VALUE value; // current value
+ private boolean hasNext; // more w/ this key
+ private boolean more; // more in file
+ private RawComparator<KEY> comparator;
+ protected Progressable reporter;
+ private Deserializer<KEY> keyDeserializer;
+ private Deserializer<VALUE> valDeserializer;
+ private DataInputBuffer keyIn = new DataInputBuffer();
+ private DataInputBuffer valueIn = new DataInputBuffer();
+
+ public ValuesIterator (RawKeyValueIterator in,
+ RawComparator<KEY> comparator,
+ Class<KEY> keyClass,
+ Class<VALUE> valClass, Configuration conf,
+ Progressable reporter)
+ throws IOException {
+ this.in = in;
+ this.comparator = comparator;
+ this.reporter = reporter;
+ SerializationFactory serializationFactory = new SerializationFactory(conf);
+ this.keyDeserializer = serializationFactory.getDeserializer(keyClass);
+ this.keyDeserializer.open(keyIn);
+ this.valDeserializer = serializationFactory.getDeserializer(valClass);
+ this.valDeserializer.open(this.valueIn);
+ readNextKey();
+ key = nextKey;
+ nextKey = null; // force new instance creation
+ hasNext = more;
+ }
+
+ RawKeyValueIterator getRawIterator() { return in; }
+
+ /// Iterator methods
+
+ public boolean hasNext() { return hasNext; }
+
+ private int ctr = 0;
+ public VALUE next() {
+ if (!hasNext) {
+ throw new NoSuchElementException("iterate past last value");
+ }
+ try {
+ readNextValue();
+ readNextKey();
+ } catch (IOException ie) {
+ throw new RuntimeException("problem advancing post rec#"+ctr, ie);
+ }
+ reporter.progress();
+ return value;
+ }
+
+ public void remove() { throw new RuntimeException("not implemented"); }
+
+ /// Auxiliary methods
+
+ /** Start processing next unique key. */
+ public void nextKey() throws IOException {
+ // read until we find a new key
+ while (hasNext) {
+ readNextKey();
+ }
+ ++ctr;
+
+ // move the next key to the current one
+ KEY tmpKey = key;
+ key = nextKey;
+ nextKey = tmpKey;
+ hasNext = more;
+ }
+
+ /** True iff more keys remain. */
+ public boolean more() {
+ return more;
+ }
+
+ /** The current key. */
+ public KEY getKey() {
+ return key;
+ }
+
+ /**
+ * read the next key
+ */
+ private void readNextKey() throws IOException {
+ more = in.next();
+ if (more) {
+ DataInputBuffer nextKeyBytes = in.getKey();
+ keyIn.reset(nextKeyBytes.getData(), nextKeyBytes.getPosition(), nextKeyBytes.getLength());
+ nextKey = keyDeserializer.deserialize(nextKey);
+ hasNext = key != null && (comparator.compare(key, nextKey) == 0);
+ } else {
+ hasNext = false;
+ }
+ }
+
+ /**
+ * Read the next value
+ * @throws IOException
+ */
+ private void readNextValue() throws IOException {
+ DataInputBuffer nextValueBytes = in.getValue();
+ valueIn.reset(nextValueBytes.getData(), nextValueBytes.getPosition(), nextValueBytes.getLength());
+ value = valDeserializer.deserialize(value);
+ }
+ }
+
+ /** Iterator to return Combined values */
+ @InterfaceAudience.Private
+ @InterfaceStability.Unstable
+ public static class CombineValuesIterator<KEY,VALUE>
+ extends ValuesIterator<KEY,VALUE> {
+
+ private final Counters.Counter combineInputCounter;
+
+ public CombineValuesIterator(RawKeyValueIterator in,
+ RawComparator<KEY> comparator, Class<KEY> keyClass,
+ Class<VALUE> valClass, Configuration conf, Reporter reporter,
+ Counters.Counter combineInputCounter) throws IOException {
+ super(in, comparator, keyClass, valClass, conf, reporter);
+ this.combineInputCounter = combineInputCounter;
+ }
+
+ public VALUE next() {
+ combineInputCounter.increment(1);
+ return super.next();
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ protected static <INKEY,INVALUE,OUTKEY,OUTVALUE>
+ org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context
+ createReduceContext(org.apache.hadoop.mapreduce.Reducer
+ <INKEY,INVALUE,OUTKEY,OUTVALUE> reducer,
+ Configuration job,
+ org.apache.hadoop.mapreduce.TaskAttemptID taskId,
+ RawKeyValueIterator rIter,
+ org.apache.hadoop.mapreduce.Counter inputKeyCounter,
+ org.apache.hadoop.mapreduce.Counter inputValueCounter,
+ org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE> output,
+ org.apache.hadoop.mapreduce.OutputCommitter committer,
+ org.apache.hadoop.mapreduce.StatusReporter reporter,
+ RawComparator<INKEY> comparator,
+ Class<INKEY> keyClass, Class<INVALUE> valueClass
+ ) throws IOException, InterruptedException {
+ org.apache.hadoop.mapreduce.ReduceContext<INKEY, INVALUE, OUTKEY, OUTVALUE>
+ reduceContext =
+ new ReduceContextImpl<INKEY, INVALUE, OUTKEY, OUTVALUE>(job, taskId,
+ rIter,
+ inputKeyCounter,
+ inputValueCounter,
+ output,
+ committer,
+ reporter,
+ comparator,
+ keyClass,
+ valueClass);
+
+ org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context
+ reducerContext =
+ new WrappedReducer<INKEY, INVALUE, OUTKEY, OUTVALUE>().getReducerContext(
+ reduceContext);
+
+ return reducerContext;
+ }
+
+ @InterfaceAudience.Private
+ @InterfaceStability.Unstable
+ protected static abstract class CombinerRunner<K,V> {
+ protected final Counters.Counter inputCounter;
+ protected final JobConf job;
+ protected final TaskReporter reporter;
+
+ CombinerRunner(Counters.Counter inputCounter,
+ JobConf job,
+ TaskReporter reporter) {
+ this.inputCounter = inputCounter;
+ this.job = job;
+ this.reporter = reporter;
+ }
+
+ /**
+ * Run the combiner over a set of inputs.
+ * @param iterator the key/value pairs to use as input
+ * @param collector the output collector
+ */
+ abstract void combine(RawKeyValueIterator iterator,
+ OutputCollector<K,V> collector
+ ) throws IOException, InterruptedException,
+ ClassNotFoundException;
+
+ @SuppressWarnings("unchecked")
+ static <K,V>
+ CombinerRunner<K,V> create(JobConf job,
+ TaskAttemptID taskId,
+ Counters.Counter inputCounter,
+ TaskReporter reporter,
+ org.apache.hadoop.mapreduce.OutputCommitter committer
+ ) throws ClassNotFoundException {
+ Class<? extends Reducer<K,V,K,V>> cls =
+ (Class<? extends Reducer<K,V,K,V>>) job.getCombinerClass();
+
+ if (cls != null) {
+ return new OldCombinerRunner(cls, job, inputCounter, reporter);
+ }
+ // make a task context so we can get the classes
+ org.apache.hadoop.mapreduce.TaskAttemptContext taskContext =
+ new org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl(job, taskId,
+ reporter);
+ Class<? extends org.apache.hadoop.mapreduce.Reducer<K,V,K,V>> newcls =
+ (Class<? extends org.apache.hadoop.mapreduce.Reducer<K,V,K,V>>)
+ taskContext.getCombinerClass();
+ if (newcls != null) {
+ return new NewCombinerRunner<K,V>(newcls, job, taskId, taskContext,
+ inputCounter, reporter, committer);
+ }
+
+ return null;
+ }
+ }
+
+ @InterfaceAudience.Private
+ @InterfaceStability.Unstable
+ protected static class OldCombinerRunner<K,V> extends CombinerRunner<K,V> {
+ private final Class<? extends Reducer<K,V,K,V>> combinerClass;
+ private final Class<K> keyClass;
+ private final Class<V> valueClass;
+ private final RawComparator<K> comparator;
+
+ @SuppressWarnings("unchecked")
+ protected OldCombinerRunner(Class<? extends Reducer<K,V,K,V>> cls,
+ JobConf conf,
+ Counters.Counter inputCounter,
+ TaskReporter reporter) {
+ super(inputCounter, conf, reporter);
+ combinerClass = cls;
+ keyClass = (Class<K>) job.getMapOutputKeyClass();
+ valueClass = (Class<V>) job.getMapOutputValueClass();
+ comparator = (RawComparator<K>) job.getOutputKeyComparator();
+ }
+
+ @SuppressWarnings("unchecked")
+ protected void combine(RawKeyValueIterator kvIter,
+ OutputCollector<K,V> combineCollector
+ ) throws IOException {
+ Reducer<K,V,K,V> combiner =
+ ReflectionUtils.newInstance(combinerClass, job);
+ try {
+ CombineValuesIterator<K,V> values =
+ new CombineValuesIterator<K,V>(kvIter, comparator, keyClass,
+ valueClass, job, Reporter.NULL,
+ inputCounter);
+ while (values.more()) {
+ combiner.reduce(values.getKey(), values, combineCollector,
+ Reporter.NULL);
+ values.nextKey();
+ }
+ } finally {
+ combiner.close();
+ }
+ }
+ }
+
+ @InterfaceAudience.Private
+ @InterfaceStability.Unstable
+ protected static class NewCombinerRunner<K, V> extends CombinerRunner<K,V> {
+ private final Class<? extends org.apache.hadoop.mapreduce.Reducer<K,V,K,V>>
+ reducerClass;
+ private final org.apache.hadoop.mapreduce.TaskAttemptID taskId;
+ private final RawComparator<K> comparator;
+ private final Class<K> keyClass;
+ private final Class<V> valueClass;
+ private final org.apache.hadoop.mapreduce.OutputCommitter committer;
+
+ @SuppressWarnings("unchecked")
+ NewCombinerRunner(Class reducerClass,
+ JobConf job,
+ org.apache.hadoop.mapreduce.TaskAttemptID taskId,
+ org.apache.hadoop.mapreduce.TaskAttemptContext context,
+ Counters.Counter inputCounter,
+ TaskReporter reporter,
+ org.apache.hadoop.mapreduce.OutputCommitter committer) {
+ super(inputCounter, job, reporter);
+ this.reducerClass = reducerClass;
+ this.taskId = taskId;
+ keyClass = (Class<K>) context.getMapOutputKeyClass();
+ valueClass = (Class<V>) context.getMapOutputValueClass();
+ comparator = (RawComparator<K>) context.getSortComparator();
+ this.committer = committer;
+ }
+
+ private static class OutputConverter<K,V>
+ extends org.apache.hadoop.mapreduce.RecordWriter<K,V> {
+ OutputCollector<K,V> output;
+ OutputConverter(OutputCollector<K,V> output) {
+ this.output = output;
+ }
+
+ @Override
+ public void close(org.apache.hadoop.mapreduce.TaskAttemptContext context){
+ }
+
+ @Override
+ public void write(K key, V value
+ ) throws IOException, InterruptedException {
+ output.collect(key,value);
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ void combine(RawKeyValueIterator iterator,
+ OutputCollector<K,V> collector
+ ) throws IOException, InterruptedException,
+ ClassNotFoundException {
+ // make a reducer
+ org.apache.hadoop.mapreduce.Reducer<K,V,K,V> reducer =
+ (org.apache.hadoop.mapreduce.Reducer<K,V,K,V>)
+ ReflectionUtils.newInstance(reducerClass, job);
+ org.apache.hadoop.mapreduce.Reducer.Context
+ reducerContext = createReduceContext(reducer, job, taskId,
+ iterator, null, inputCounter,
+ new OutputConverter(collector),
+ committer,
+ reporter, comparator, keyClass,
+ valueClass);
+ reducer.run(reducerContext);
+ }
+ }
+
+ BytesWritable getExtraData() {
+ return extraData;
+ }
+
+ void setExtraData(BytesWritable extraData) {
+ this.extraData = extraData;
+ }
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskAttemptContext.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskAttemptContext.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskAttemptContext.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskAttemptContext.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskAttemptContextImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskAttemptContextImpl.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskAttemptContextImpl.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskAttemptContextImpl.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskAttemptID.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskAttemptID.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskAttemptID.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskAttemptID.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskCompletionEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskCompletionEvent.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskCompletionEvent.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskCompletionEvent.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskID.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskID.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskID.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskID.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskLog.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskLog.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskLogAppender.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLogAppender.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskLogAppender.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLogAppender.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskReport.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskReport.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskReport.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskReport.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskStatus.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskStatus.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskStatus.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskStatus.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TextInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TextInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TextInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TextInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TextOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TextOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TextOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TextOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/Utils.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Utils.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/Utils.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Utils.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/jobcontrol/Job.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/jobcontrol/Job.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/jobcontrol/Job.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/jobcontrol/Job.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/jobcontrol/JobControl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/jobcontrol/JobControl.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/jobcontrol/JobControl.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/jobcontrol/JobControl.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/join/ArrayListBackedIterator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/ArrayListBackedIterator.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/join/ArrayListBackedIterator.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/ArrayListBackedIterator.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/join/ComposableInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/ComposableInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/join/ComposableInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/ComposableInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/join/ComposableRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/ComposableRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/join/ComposableRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/ComposableRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/join/CompositeInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/join/CompositeInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/join/CompositeInputSplit.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeInputSplit.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/join/CompositeInputSplit.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeInputSplit.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/join/CompositeRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/join/CompositeRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/join/InnerJoinRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/InnerJoinRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/join/InnerJoinRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/InnerJoinRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/join/JoinRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/JoinRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/join/JoinRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/JoinRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/join/MultiFilterRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/MultiFilterRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/join/MultiFilterRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/MultiFilterRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/join/OuterJoinRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/OuterJoinRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/join/OuterJoinRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/OuterJoinRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/join/OverrideRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/OverrideRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/join/OverrideRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/OverrideRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/join/Parser.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/Parser.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/join/Parser.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/Parser.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/join/ResetableIterator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/ResetableIterator.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/join/ResetableIterator.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/ResetableIterator.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/join/StreamBackedIterator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/StreamBackedIterator.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/join/StreamBackedIterator.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/StreamBackedIterator.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/join/TupleWritable.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/TupleWritable.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/join/TupleWritable.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/TupleWritable.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/join/WrappedRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/WrappedRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/join/WrappedRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/WrappedRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/BinaryPartitioner.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/BinaryPartitioner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/BinaryPartitioner.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/BinaryPartitioner.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/Chain.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/Chain.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/Chain.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/Chain.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/ChainMapper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/ChainMapper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/ChainMapper.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/ChainMapper.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/ChainReducer.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/ChainReducer.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/ChainReducer.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/ChainReducer.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/CombineFileInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/CombineFileInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/CombineFileInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/CombineFileInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/CombineFileRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/CombineFileRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/CombineFileRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/CombineFileRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/CombineFileSplit.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/CombineFileSplit.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/CombineFileSplit.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/CombineFileSplit.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/DelegatingInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/DelegatingInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/DelegatingInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/DelegatingInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/DelegatingMapper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/DelegatingMapper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/DelegatingMapper.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/DelegatingMapper.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/FieldSelectionMapReduce.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/FieldSelectionMapReduce.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/FieldSelectionMapReduce.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/FieldSelectionMapReduce.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/FilterOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/FilterOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/FilterOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/FilterOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/HashPartitioner.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/HashPartitioner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/HashPartitioner.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/HashPartitioner.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/IdentityMapper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/IdentityMapper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/IdentityMapper.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/IdentityMapper.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/IdentityReducer.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/IdentityReducer.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/IdentityReducer.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/IdentityReducer.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/InputSampler.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/InputSampler.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/InputSampler.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/InputSampler.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/InverseMapper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/InverseMapper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/InverseMapper.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/InverseMapper.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/KeyFieldBasedComparator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/KeyFieldBasedComparator.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/KeyFieldBasedComparator.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/KeyFieldBasedComparator.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/KeyFieldBasedPartitioner.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/KeyFieldBasedPartitioner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/KeyFieldBasedPartitioner.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/KeyFieldBasedPartitioner.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/LazyOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/LazyOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/LazyOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/LazyOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/LongSumReducer.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/LongSumReducer.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/LongSumReducer.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/LongSumReducer.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/MultipleInputs.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleInputs.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/MultipleInputs.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleInputs.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/MultipleOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/MultipleOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/MultipleOutputs.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleOutputs.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/MultipleOutputs.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleOutputs.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/MultipleSequenceFileOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleSequenceFileOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/MultipleSequenceFileOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleSequenceFileOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/MultipleTextOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleTextOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/MultipleTextOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleTextOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/NLineInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/NLineInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/NLineInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/NLineInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/NullOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/NullOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/NullOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/NullOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/RegexMapper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/RegexMapper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/RegexMapper.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/RegexMapper.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/TaggedInputSplit.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TaggedInputSplit.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/TaggedInputSplit.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TaggedInputSplit.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/TokenCountMapper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TokenCountMapper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/TokenCountMapper.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TokenCountMapper.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/DoubleValueSum.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/DoubleValueSum.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/DoubleValueSum.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/DoubleValueSum.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/LongValueMax.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/LongValueMax.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/LongValueMax.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/LongValueMax.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/LongValueMin.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/LongValueMin.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/LongValueMin.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/LongValueMin.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/LongValueSum.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/LongValueSum.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/LongValueSum.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/LongValueSum.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/StringValueMax.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/StringValueMax.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/StringValueMax.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/StringValueMax.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/StringValueMin.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/StringValueMin.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/StringValueMin.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/StringValueMin.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/UniqValueCount.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/UniqValueCount.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/UniqValueCount.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/UniqValueCount.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/UserDefinedValueAggregatorDescriptor.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/UserDefinedValueAggregatorDescriptor.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/UserDefinedValueAggregatorDescriptor.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/UserDefinedValueAggregatorDescriptor.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregator.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregator.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregator.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorBaseDescriptor.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorBaseDescriptor.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorBaseDescriptor.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorBaseDescriptor.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorCombiner.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorCombiner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorCombiner.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorCombiner.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorDescriptor.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorDescriptor.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorDescriptor.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorDescriptor.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJob.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJob.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJob.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJob.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJobBase.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJobBase.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJobBase.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJobBase.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorMapper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorMapper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorMapper.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorMapper.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorReducer.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorReducer.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorReducer.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorReducer.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/ValueHistogram.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueHistogram.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/ValueHistogram.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueHistogram.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/db/DBConfiguration.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/db/DBConfiguration.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/db/DBConfiguration.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/db/DBConfiguration.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/db/DBInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/db/DBInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/db/DBInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/db/DBInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/db/DBOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/db/DBOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/db/DBOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/db/DBOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/db/DBWritable.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/db/DBWritable.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/db/DBWritable.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/db/DBWritable.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/pipes/Application.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/pipes/Application.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/pipes/DownwardProtocol.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/DownwardProtocol.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/pipes/DownwardProtocol.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/DownwardProtocol.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/pipes/OutputHandler.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/OutputHandler.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/pipes/OutputHandler.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/OutputHandler.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/pipes/PipesMapRunner.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/PipesMapRunner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/pipes/PipesMapRunner.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/PipesMapRunner.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/pipes/PipesNonJavaInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/PipesNonJavaInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/pipes/PipesNonJavaInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/PipesNonJavaInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/pipes/PipesPartitioner.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/PipesPartitioner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/pipes/PipesPartitioner.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/PipesPartitioner.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/pipes/PipesReducer.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/PipesReducer.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/pipes/PipesReducer.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/PipesReducer.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/pipes/Submitter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/pipes/Submitter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/pipes/UpwardProtocol.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/UpwardProtocol.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/pipes/UpwardProtocol.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/UpwardProtocol.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
new file mode 100644
index 0000000..3a508ce6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
@@ -0,0 +1,391 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.ServiceLoader;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.Master;
+import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
+import org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider;
+import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.mapreduce.util.ConfigUtil;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+
+/**
+ * Provides a way to access information about the map/reduce cluster.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class Cluster {
+
+ @InterfaceStability.Evolving
+ public static enum JobTrackerStatus {INITIALIZING, RUNNING};
+
+ private ClientProtocolProvider clientProtocolProvider;
+ private ClientProtocol client;
+ private UserGroupInformation ugi;
+ private Configuration conf;
+ private FileSystem fs = null;
+ private Path sysDir = null;
+ private Path stagingAreaDir = null;
+ private Path jobHistoryDir = null;
+
+ static {
+ ConfigUtil.loadResources();
+ }
+
+ public Cluster(Configuration conf) throws IOException {
+ this.conf = conf;
+ this.ugi = UserGroupInformation.getCurrentUser();
+ for (ClientProtocolProvider provider : ServiceLoader.load(ClientProtocolProvider.class)) {
+ ClientProtocol clientProtocol = provider.create(conf);
+ if (clientProtocol != null) {
+ clientProtocolProvider = provider;
+ client = clientProtocol;
+ break;
+ }
+ }
+ }
+
+ public Cluster(InetSocketAddress jobTrackAddr, Configuration conf)
+ throws IOException {
+ this.conf = conf;
+ this.ugi = UserGroupInformation.getCurrentUser();
+ for (ClientProtocolProvider provider : ServiceLoader.load(ClientProtocolProvider.class)) {
+ ClientProtocol clientProtocol = provider.create(jobTrackAddr, conf);
+ if (clientProtocol != null) {
+ clientProtocolProvider = provider;
+ client = clientProtocol;
+ break;
+ }
+ }
+ }
+
+ ClientProtocol getClient() {
+ return client;
+ }
+
+ Configuration getConf() {
+ return conf;
+ }
+
+ /**
+ * Close the <code>Cluster</code>.
+ */
+ public synchronized void close() throws IOException {
+ clientProtocolProvider.close(client);
+ }
+
+ private Job[] getJobs(JobStatus[] stats) throws IOException {
+ List<Job> jobs = new ArrayList<Job>();
+ for (JobStatus stat : stats) {
+ jobs.add(new Job(this, stat, new JobConf(stat.getJobFile())));
+ }
+ return jobs.toArray(new Job[0]);
+ }
+
+ /**
+ * Get the file system where job-specific files are stored
+ *
+ * @return object of FileSystem
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public synchronized FileSystem getFileSystem()
+ throws IOException, InterruptedException {
+ if (this.fs == null) {
+ try {
+ this.fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
+ public FileSystem run() throws IOException, InterruptedException {
+ final Path sysDir = new Path(client.getSystemDir());
+ return sysDir.getFileSystem(getConf());
+ }
+ });
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ return fs;
+ }
+
+ /**
+ * Get job corresponding to jobid.
+ *
+ * @param jobId
+ * @return object of {@link Job}
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public Job getJob(JobID jobId) throws IOException, InterruptedException {
+ JobStatus status = client.getJobStatus(jobId);
+ if (status != null) {
+ return new Job(this, status, new JobConf(status.getJobFile()));
+ }
+ return null;
+ }
+
+ /**
+ * Get all the queues in cluster.
+ *
+ * @return array of {@link QueueInfo}
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public QueueInfo[] getQueues() throws IOException, InterruptedException {
+ return client.getQueues();
+ }
+
+ /**
+ * Get queue information for the specified name.
+ *
+ * @param name queuename
+ * @return object of {@link QueueInfo}
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public QueueInfo getQueue(String name)
+ throws IOException, InterruptedException {
+ return client.getQueue(name);
+ }
+
+ /**
+ * Get current cluster status.
+ *
+ * @return object of {@link ClusterMetrics}
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public ClusterMetrics getClusterStatus() throws IOException, InterruptedException {
+ return client.getClusterMetrics();
+ }
+
+ /**
+ * Get all active trackers in the cluster.
+ *
+ * @return array of {@link TaskTrackerInfo}
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public TaskTrackerInfo[] getActiveTaskTrackers()
+ throws IOException, InterruptedException {
+ return client.getActiveTrackers();
+ }
+
+ /**
+ * Get blacklisted trackers.
+ *
+ * @return array of {@link TaskTrackerInfo}
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public TaskTrackerInfo[] getBlackListedTaskTrackers()
+ throws IOException, InterruptedException {
+ return client.getBlacklistedTrackers();
+ }
+
+ /**
+ * Get all the jobs in cluster.
+ *
+ * @return array of {@link Job}
+ * @throws IOException
+ * @throws InterruptedException
+ * @deprecated Use {@link #getAllJobStatuses()} instead.
+ */
+ @Deprecated
+ public Job[] getAllJobs() throws IOException, InterruptedException {
+ return getJobs(client.getAllJobs());
+ }
+
+ /**
+ * Get job status for all jobs in the cluster.
+ * @return job status for all jobs in cluster
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public JobStatus[] getAllJobStatuses() throws IOException, InterruptedException {
+ return client.getAllJobs();
+ }
+
+ /**
+ * Grab the jobtracker system directory path where
+ * job-specific files will be placed.
+ *
+ * @return the system directory where job-specific files are to be placed.
+ */
+ public Path getSystemDir() throws IOException, InterruptedException {
+ if (sysDir == null) {
+ sysDir = new Path(client.getSystemDir());
+ }
+ return sysDir;
+ }
+
+ /**
+ * Grab the jobtracker's view of the staging directory path where
+ * job-specific files will be placed.
+ *
+ * @return the staging directory where job-specific files are to be placed.
+ */
+ public Path getStagingAreaDir() throws IOException, InterruptedException {
+ if (stagingAreaDir == null) {
+ stagingAreaDir = new Path(client.getStagingAreaDir());
+ }
+ return stagingAreaDir;
+ }
+
+ /**
+ * Get the job history file path for a given job id. The job history file at
+ * this path may or may not be existing depending on the job completion state.
+ * The file is present only for the completed jobs.
+ * @param jobId the JobID of the job submitted by the current user.
+ * @return the file path of the job history file
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public String getJobHistoryUrl(JobID jobId) throws IOException,
+ InterruptedException {
+ if (jobHistoryDir == null) {
+ jobHistoryDir = new Path(client.getJobHistoryDir());
+ }
+ return new Path(jobHistoryDir, jobId.toString() + "_"
+ + ugi.getShortUserName()).toString();
+ }
+
+ /**
+ * Gets the Queue ACLs for current user
+ * @return array of QueueAclsInfo object for current user.
+ * @throws IOException
+ */
+ public QueueAclsInfo[] getQueueAclsForCurrentUser()
+ throws IOException, InterruptedException {
+ return client.getQueueAclsForCurrentUser();
+ }
+
+ /**
+ * Gets the root level queues.
+ * @return array of JobQueueInfo object.
+ * @throws IOException
+ */
+ public QueueInfo[] getRootQueues() throws IOException, InterruptedException {
+ return client.getRootQueues();
+ }
+
+ /**
+ * Returns immediate children of queueName.
+ * @param queueName
+ * @return array of JobQueueInfo which are children of queueName
+ * @throws IOException
+ */
+ public QueueInfo[] getChildQueues(String queueName)
+ throws IOException, InterruptedException {
+ return client.getChildQueues(queueName);
+ }
+
+ /**
+ * Get the JobTracker's status.
+ *
+ * @return {@link JobTrackerStatus} of the JobTracker
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public JobTrackerStatus getJobTrackerStatus() throws IOException,
+ InterruptedException {
+ return client.getJobTrackerStatus();
+ }
+
+ /**
+ * Get the tasktracker expiry interval for the cluster
+ * @return the expiry interval in msec
+ */
+ public long getTaskTrackerExpiryInterval() throws IOException,
+ InterruptedException {
+ return client.getTaskTrackerExpiryInterval();
+ }
+
+ /**
+ * Get a delegation token for the user from the JobTracker.
+ * @param renewer the user who can renew the token
+ * @return the new token
+ * @throws IOException
+ */
+ public Token<DelegationTokenIdentifier>
+ getDelegationToken(Text renewer) throws IOException, InterruptedException{
+ Token<DelegationTokenIdentifier> result =
+ client.getDelegationToken(renewer);
+ InetSocketAddress addr = Master.getMasterAddress(conf);
+ StringBuilder service = new StringBuilder();
+ service.append(NetUtils.normalizeHostName(addr.getAddress().
+ getHostAddress()));
+ service.append(':');
+ service.append(addr.getPort());
+ result.setService(new Text(service.toString()));
+ return result;
+ }
+
+ /**
+ * Renew a delegation token
+ * @param token the token to renew
+ * @return the new expiration time
+ * @throws InvalidToken
+ * @throws IOException
+ */
+ public long renewDelegationToken(Token<DelegationTokenIdentifier> token
+ ) throws InvalidToken, IOException,
+ InterruptedException {
+ try {
+ return client.renewDelegationToken(token);
+ } catch (RemoteException re) {
+ throw re.unwrapRemoteException(InvalidToken.class,
+ AccessControlException.class);
+ }
+ }
+
+ /**
+ * Cancel a delegation token from the JobTracker
+ * @param token the token to cancel
+ * @throws IOException
+ */
+ public void cancelDelegationToken(Token<DelegationTokenIdentifier> token
+ ) throws IOException,
+ InterruptedException {
+ try {
+ client.cancelDelegationToken(token);
+ } catch (RemoteException re) {
+ throw re.unwrapRemoteException(InvalidToken.class,
+ AccessControlException.class);
+ }
+ }
+
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/ClusterMetrics.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/ClusterMetrics.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/ClusterMetrics.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/ClusterMetrics.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/Counter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Counter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/Counter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Counter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/CounterGroup.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CounterGroup.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/CounterGroup.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CounterGroup.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/Counters.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Counters.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/Counters.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Counters.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/FileSystemCounter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/FileSystemCounter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/FileSystemCounter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/FileSystemCounter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/ID.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/ID.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/ID.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/ID.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/InputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/InputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/InputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/InputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/InputSplit.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/InputSplit.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/InputSplit.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/InputSplit.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/Job.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/Job.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/JobACL.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobACL.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/JobACL.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobACL.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/JobContext.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobContext.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/JobContext.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobContext.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobCounter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobCounter.java
new file mode 100644
index 0000000..784e1d5
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobCounter.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+// Per-job counters
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public enum JobCounter {
+ NUM_FAILED_MAPS,
+ NUM_FAILED_REDUCES,
+ TOTAL_LAUNCHED_MAPS,
+ TOTAL_LAUNCHED_REDUCES,
+ OTHER_LOCAL_MAPS,
+ DATA_LOCAL_MAPS,
+ RACK_LOCAL_MAPS,
+ SLOTS_MILLIS_MAPS,
+ SLOTS_MILLIS_REDUCES,
+ FALLOW_SLOTS_MILLIS_MAPS,
+ FALLOW_SLOTS_MILLIS_REDUCES,
+ TOTAL_LAUNCHED_UBERTASKS,
+ NUM_UBER_SUBMAPS,
+ NUM_UBER_SUBREDUCES,
+ NUM_FAILED_UBERTASKS
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/JobID.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobID.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/JobID.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobID.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/JobPriority.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobPriority.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/JobPriority.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobPriority.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/JobStatus.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/JobStatus.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/JobSubmitter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/JobSubmitter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
new file mode 100644
index 0000000..0225182
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Place holder for cluster level configuration keys.
+ *
+ * These keys are used by both {@link JobTracker} and {@link TaskTracker}. The
+ * keys should have "mapreduce.cluster." as the prefix.
+ *
+ */
+@InterfaceAudience.Private
+public interface MRConfig {
+
+ // Cluster-level configuration parameters
+ public static final String TEMP_DIR = "mapreduce.cluster.temp.dir";
+ public static final String LOCAL_DIR = "mapreduce.cluster.local.dir";
+ public static final String MAPMEMORY_MB = "mapreduce.cluster.mapmemory.mb";
+ public static final String REDUCEMEMORY_MB =
+ "mapreduce.cluster.reducememory.mb";
+ public static final String MR_ACLS_ENABLED = "mapreduce.cluster.acls.enabled";
+ public static final String MR_ADMINS =
+ "mapreduce.cluster.administrators";
+ @Deprecated
+ public static final String MR_SUPERGROUP =
+ "mapreduce.cluster.permissions.supergroup";
+
+ //Delegation token related keys
+ public static final String DELEGATION_KEY_UPDATE_INTERVAL_KEY =
+ "mapreduce.cluster.delegation.key.update-interval";
+ public static final long DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT =
+ 24*60*60*1000; // 1 day
+ public static final String DELEGATION_TOKEN_RENEW_INTERVAL_KEY =
+ "mapreduce.cluster.delegation.token.renew-interval";
+ public static final long DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT =
+ 24*60*60*1000; // 1 day
+ public static final String DELEGATION_TOKEN_MAX_LIFETIME_KEY =
+ "mapreduce.cluster.delegation.token.max-lifetime";
+ public static final long DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT =
+ 7*24*60*60*1000; // 7 days
+
+ public static final String RESOURCE_CALCULATOR_PLUGIN =
+ "mapreduce.job.resourcecalculatorplugin";
+ public static final String STATIC_RESOLUTIONS =
+ "mapreduce.job.net.static.resolutions";
+
+ public static final String MASTER_ADDRESS = "mapreduce.jobtracker.address";
+ public static final String MASTER_USER_NAME =
+ "mapreduce.jobtracker.kerberos.principal";
+
+ public static final String FRAMEWORK_NAME = "mapreduce.framework.name";
+ public static final String TASK_LOCAL_OUTPUT_CLASS =
+ "mapreduce.task.local.output.class";
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
new file mode 100644
index 0000000..55ab70f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -0,0 +1,304 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface MRJobConfig {
+
+ // Put all of the attribute names in here so that Job and JobContext are
+ // consistent.
+ public static final String INPUT_FORMAT_CLASS_ATTR = "mapreduce.job.inputformat.class";
+
+ public static final String MAP_CLASS_ATTR = "mapreduce.job.map.class";
+
+ public static final String COMBINE_CLASS_ATTR = "mapreduce.job.combine.class";
+
+ public static final String REDUCE_CLASS_ATTR = "mapreduce.job.reduce.class";
+
+ public static final String OUTPUT_FORMAT_CLASS_ATTR = "mapreduce.job.outputformat.class";
+
+ public static final String PARTITIONER_CLASS_ATTR = "mapreduce.job.partitioner.class";
+
+ public static final String SETUP_CLEANUP_NEEDED = "mapreduce.job.committer.setup.cleanup.needed";
+
+ public static final String TASK_CLEANUP_NEEDED = "mapreduce.job.committer.task.cleanup.needed";
+
+ public static final String JAR = "mapreduce.job.jar";
+
+ public static final String ID = "mapreduce.job.id";
+
+ public static final String JOB_NAME = "mapreduce.job.name";
+
+ public static final String JAR_UNPACK_PATTERN = "mapreduce.job.jar.unpack.pattern";
+
+ public static final String USER_NAME = "mapreduce.job.user.name";
+
+ public static final String PRIORITY = "mapreduce.job.priority";
+
+ public static final String QUEUE_NAME = "mapreduce.job.queuename";
+
+ public static final String JVM_NUMTASKS_TORUN = "mapreduce.job.jvm.numtasks";
+
+ public static final String SPLIT_FILE = "mapreduce.job.splitfile";
+
+ public static final String NUM_MAPS = "mapreduce.job.maps";
+
+ public static final String MAX_TASK_FAILURES_PER_TRACKER = "mapreduce.job.maxtaskfailures.per.tracker";
+
+ public static final String COMPLETED_MAPS_FOR_REDUCE_SLOWSTART = "mapreduce.job.reduce.slowstart.completedmaps";
+
+ public static final String NUM_REDUCES = "mapreduce.job.reduces";
+
+ public static final String SKIP_RECORDS = "mapreduce.job.skiprecords";
+
+ public static final String SKIP_OUTDIR = "mapreduce.job.skip.outdir";
+
+ public static final String SPECULATIVE_SLOWNODE_THRESHOLD = "mapreduce.job.speculative.slownodethreshold";
+
+ public static final String SPECULATIVE_SLOWTASK_THRESHOLD = "mapreduce.job.speculative.slowtaskthreshold";
+
+ public static final String SPECULATIVECAP = "mapreduce.job.speculative.speculativecap";
+
+ public static final String JOB_LOCAL_DIR = "mapreduce.job.local.dir";
+
+ public static final String OUTPUT_KEY_CLASS = "mapreduce.job.output.key.class";
+
+ public static final String OUTPUT_VALUE_CLASS = "mapreduce.job.output.value.class";
+
+ public static final String KEY_COMPARATOR = "mapreduce.job.output.key.comparator.class";
+
+ public static final String GROUP_COMPARATOR_CLASS = "mapreduce.job.output.group.comparator.class";
+
+ public static final String WORKING_DIR = "mapreduce.job.working.dir";
+
+ public static final String END_NOTIFICATION_URL = "mapreduce.job.end-notification.url";
+
+ public static final String END_NOTIFICATION_RETRIES = "mapreduce.job.end-notification.retry.attempts";
+
+ public static final String END_NOTIFICATION_RETRIE_INTERVAL = "mapreduce.job.end-notification.retry.interval";
+
+ public static final String CLASSPATH_ARCHIVES = "mapreduce.job.classpath.archives";
+
+ public static final String CLASSPATH_FILES = "mapreduce.job.classpath.files";
+
+ public static final String CACHE_FILES = "mapreduce.job.cache.files";
+
+ public static final String CACHE_ARCHIVES = "mapreduce.job.cache.archives";
+
+ public static final String CACHE_FILES_SIZES = "mapreduce.job.cache.files.filesizes"; // internal use only
+
+ public static final String CACHE_ARCHIVES_SIZES = "mapreduce.job.cache.archives.filesizes"; // ditto
+
+ public static final String CACHE_LOCALFILES = "mapreduce.job.cache.local.files";
+
+ public static final String CACHE_LOCALARCHIVES = "mapreduce.job.cache.local.archives";
+
+ public static final String CACHE_FILE_TIMESTAMPS = "mapreduce.job.cache.files.timestamps";
+
+ public static final String CACHE_ARCHIVES_TIMESTAMPS = "mapreduce.job.cache.archives.timestamps";
+
+ public static final String CACHE_FILE_VISIBILITIES = "mapreduce.job.cache.files.visibilities";
+
+ public static final String CACHE_ARCHIVES_VISIBILITIES = "mapreduce.job.cache.archives.visibilities";
+
+ public static final String CACHE_SYMLINK = "mapreduce.job.cache.symlink.create";
+
+ public static final String USER_LOG_RETAIN_HOURS = "mapreduce.job.userlog.retain.hours";
+
+ public static final String IO_SORT_FACTOR = "mapreduce.task.io.sort.factor";
+
+ public static final String IO_SORT_MB = "mapreduce.task.io.sort.mb";
+
+ public static final String INDEX_CACHE_MEMORY_LIMIT = "mapreduce.task.index.cache.limit.bytes";
+
+ public static final String PRESERVE_FAILED_TASK_FILES = "mapreduce.task.files.preserve.failedtasks";
+
+ public static final String PRESERVE_FILES_PATTERN = "mapreduce.task.files.preserve.filepattern";
+
+ public static final String TASK_TEMP_DIR = "mapreduce.task.tmp.dir";
+
+ public static final String TASK_DEBUGOUT_LINES = "mapreduce.task.debugout.lines";
+
+ public static final String RECORDS_BEFORE_PROGRESS = "mapreduce.task.merge.progress.records";
+
+ public static final String SKIP_START_ATTEMPTS = "mapreduce.task.skip.start.attempts";
+
+ public static final String TASK_ATTEMPT_ID = "mapreduce.task.attempt.id";
+
+ public static final String TASK_ISMAP = "mapreduce.task.ismap";
+
+ public static final String TASK_PARTITION = "mapreduce.task.partition";
+
+ public static final String TASK_PROFILE = "mapreduce.task.profile";
+
+ public static final String TASK_PROFILE_PARAMS = "mapreduce.task.profile.params";
+
+ public static final String NUM_MAP_PROFILES = "mapreduce.task.profile.maps";
+
+ public static final String NUM_REDUCE_PROFILES = "mapreduce.task.profile.reduces";
+
+ public static final String TASK_TIMEOUT = "mapreduce.task.timeout";
+
+ public static final String TASK_ID = "mapreduce.task.id";
+
+ public static final String TASK_OUTPUT_DIR = "mapreduce.task.output.dir";
+
+ public static final String TASK_USERLOG_LIMIT = "mapreduce.task.userlog.limit.kb";
+
+ public static final String MAP_SORT_SPILL_PERCENT = "mapreduce.map.sort.spill.percent";
+
+ public static final String MAP_INPUT_FILE = "mapreduce.map.input.file";
+
+ public static final String MAP_INPUT_PATH = "mapreduce.map.input.length";
+
+ public static final String MAP_INPUT_START = "mapreduce.map.input.start";
+
+ public static final String MAP_MEMORY_MB = "mapreduce.map.memory.mb";
+
+ public static final String MAP_MEMORY_PHYSICAL_MB = "mapreduce.map.memory.physical.mb";
+
+ public static final String MAP_ENV = "mapreduce.map.env";
+
+ public static final String MAP_JAVA_OPTS = "mapreduce.map.java.opts";
+
+ public static final String MAP_ULIMIT = "mapreduce.map.ulimit";
+
+ public static final String MAP_MAX_ATTEMPTS = "mapreduce.map.maxattempts";
+
+ public static final String MAP_DEBUG_SCRIPT = "mapreduce.map.debug.script";
+
+ public static final String MAP_SPECULATIVE = "mapreduce.map.speculative";
+
+ public static final String MAP_FAILURES_MAX_PERCENT = "mapreduce.map.failures.maxpercent";
+
+ public static final String MAP_SKIP_INCR_PROC_COUNT = "mapreduce.map.skip.proc-count.auto-incr";
+
+ public static final String MAP_SKIP_MAX_RECORDS = "mapreduce.map.skip.maxrecords";
+
+ public static final String MAP_COMBINE_MIN_SPILLS = "mapreduce.map.combine.minspills";
+
+ public static final String MAP_OUTPUT_COMPRESS = "mapreduce.map.output.compress";
+
+ public static final String MAP_OUTPUT_COMPRESS_CODEC = "mapreduce.map.output.compress.codec";
+
+ public static final String MAP_OUTPUT_KEY_CLASS = "mapreduce.map.output.key.class";
+
+ public static final String MAP_OUTPUT_VALUE_CLASS = "mapreduce.map.output.value.class";
+
+ public static final String MAP_OUTPUT_KEY_FIELD_SEPERATOR = "mapreduce.map.output.key.field.separator";
+
+ public static final String MAP_LOG_LEVEL = "mapreduce.map.log.level";
+
+ public static final String REDUCE_LOG_LEVEL = "mapreduce.reduce.log.level";
+
+ public static final String REDUCE_MERGE_INMEM_THRESHOLD = "mapreduce.reduce.merge.inmem.threshold";
+
+ public static final String REDUCE_INPUT_BUFFER_PERCENT = "mapreduce.reduce.input.buffer.percent";
+
+ public static final String REDUCE_MARKRESET_BUFFER_PERCENT = "mapreduce.reduce.markreset.buffer.percent";
+
+ public static final String REDUCE_MARKRESET_BUFFER_SIZE = "mapreduce.reduce.markreset.buffer.size";
+
+ public static final String REDUCE_MEMORY_PHYSICAL_MB = "mapreduce.reduce.memory.physical.mb";
+
+ public static final String REDUCE_MEMORY_MB = "mapreduce.reduce.memory.mb";
+
+ public static final String REDUCE_MEMORY_TOTAL_BYTES = "mapreduce.reduce.memory.totalbytes";
+
+ public static final String SHUFFLE_INPUT_BUFFER_PERCENT = "mapreduce.reduce.shuffle.input.buffer.percent";
+
+ public static final String SHUFFLE_MERGE_EPRCENT = "mapreduce.reduce.shuffle.merge.percent";
+
+ public static final String REDUCE_FAILURES_MAXPERCENT = "mapreduce.reduce.failures.maxpercent";
+
+ public static final String REDUCE_ENV = "mapreduce.reduce.env";
+
+ public static final String REDUCE_JAVA_OPTS = "mapreduce.reduce.java.opts";
+
+ public static final String REDUCE_ULIMIT = "mapreduce.reduce.ulimit";
+
+ public static final String REDUCE_MAX_ATTEMPTS = "mapreduce.reduce.maxattempts";
+
+ public static final String SHUFFLE_PARALLEL_COPIES = "mapreduce.reduce.shuffle.parallelcopies";
+
+ public static final String REDUCE_DEBUG_SCRIPT = "mapreduce.reduce.debug.script";
+
+ public static final String REDUCE_SPECULATIVE = "mapreduce.reduce.speculative";
+
+ public static final String SHUFFLE_CONNECT_TIMEOUT = "mapreduce.reduce.shuffle.connect.timeout";
+
+ public static final String SHUFFLE_READ_TIMEOUT = "mapreduce.reduce.shuffle.read.timeout";
+
+ public static final String SHUFFLE_FETCH_FAILURES = "mapreduce.reduce.shuffle.maxfetchfailures";
+
+ public static final String SHUFFLE_NOTIFY_READERROR = "mapreduce.reduce.shuffle.notify.readerror";
+
+ public static final String REDUCE_SKIP_INCR_PROC_COUNT = "mapreduce.reduce.skip.proc-count.auto-incr";
+
+ public static final String REDUCE_SKIP_MAXGROUPS = "mapreduce.reduce.skip.maxgroups";
+
+ public static final String REDUCE_MEMTOMEM_THRESHOLD = "mapreduce.reduce.merge.memtomem.threshold";
+
+ public static final String REDUCE_MEMTOMEM_ENABLED = "mapreduce.reduce.merge.memtomem.enabled";
+
+ public static final String COMBINE_RECORDS_BEFORE_PROGRESS = "mapreduce.task.combine.progress.records";
+
+ public static final String JOB_NAMENODES = "mapreduce.job.hdfs-servers";
+
+ public static final String JOB_JOBTRACKER_ID = "mapreduce.job.kerberos.jtprinicipal";
+
+ public static final String JOB_CANCEL_DELEGATION_TOKEN = "mapreduce.job.complete.cancel.delegation.tokens";
+
+ public static final String JOB_ACL_VIEW_JOB = "mapreduce.job.acl-view-job";
+
+ public static final String JOB_ACL_MODIFY_JOB = "mapreduce.job.acl-modify-job";
+ public static final String JOB_SUBMITHOST =
+ "mapreduce.job.submithostname";
+ public static final String JOB_SUBMITHOSTADDR =
+ "mapreduce.job.submithostaddress";
+
+ public static final String COUNTERS_MAX_KEY = "mapreduce.job.counters.max";
+ public static final int COUNTERS_MAX_DEFAULT = 120;
+
+ public static final String COUNTER_GROUP_NAME_MAX_KEY = "mapreduce.job.counters.group.name.max";
+ public static final int COUNTER_GROUP_NAME_MAX_DEFAULT = 128;
+
+ public static final String COUNTER_NAME_MAX_KEY = "mapreduce.job.counters.counter.name.max";
+ public static final int COUNTER_NAME_MAX_DEFAULT = 64;
+
+ public static final String COUNTER_GROUPS_MAX_KEY = "mapreduce.job.counters.groups.max";
+ public static final int COUNTER_GROUPS_MAX_DEFAULT = 50;
+ public static final String JOB_UBERTASK_ENABLE =
+ "mapreduce.job.ubertask.enable";
+ public static final String JOB_UBERTASK_MAXMAPS =
+ "mapreduce.job.ubertask.maxmaps";
+ public static final String JOB_UBERTASK_MAXREDUCES =
+ "mapreduce.job.ubertask.maxreduces";
+ public static final String JOB_UBERTASK_MAXBYTES =
+ "mapreduce.job.ubertask.maxbytes";
+ public static final String UBERTASK_JAVA_OPTS =
+ "mapreduce.ubertask.child.java.opts"; // or mapreduce.uber.java.opts?
+ public static final String UBERTASK_ULIMIT =
+ "mapreduce.ubertask.child.ulimit"; // or mapreduce.uber.ulimit?
+ public static final String UBERTASK_ENV =
+ "mapreduce.ubertask.child.env"; // or mapreduce.uber.env?
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/MapContext.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MapContext.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/MapContext.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MapContext.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/Mapper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Mapper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/Mapper.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Mapper.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/MarkableIterator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MarkableIterator.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/MarkableIterator.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MarkableIterator.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/MarkableIteratorInterface.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MarkableIteratorInterface.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/MarkableIteratorInterface.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MarkableIteratorInterface.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/OutputCommitter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/OutputCommitter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/OutputCommitter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/OutputCommitter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/OutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/OutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/OutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/OutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/Partitioner.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Partitioner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/Partitioner.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Partitioner.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/QueueAclsInfo.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/QueueAclsInfo.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/QueueAclsInfo.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/QueueAclsInfo.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/QueueInfo.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/QueueInfo.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/QueueInfo.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/QueueInfo.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/QueueState.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/QueueState.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/QueueState.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/QueueState.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/RecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/RecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/RecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/RecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/RecordWriter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/RecordWriter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/RecordWriter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/RecordWriter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/ReduceContext.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/ReduceContext.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/ReduceContext.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/ReduceContext.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/Reducer.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Reducer.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/Reducer.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Reducer.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/StatusReporter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/StatusReporter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/StatusReporter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/StatusReporter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/TaskAttemptContext.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskAttemptContext.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/TaskAttemptContext.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskAttemptContext.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/TaskAttemptID.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskAttemptID.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/TaskAttemptID.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskAttemptID.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/TaskCompletionEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskCompletionEvent.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/TaskCompletionEvent.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskCompletionEvent.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/TaskCounter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskCounter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/TaskCounter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskCounter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/TaskID.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/TaskID.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/TaskInputOutputContext.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskInputOutputContext.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/TaskInputOutputContext.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskInputOutputContext.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/TaskReport.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskReport.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/TaskReport.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskReport.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/TaskTrackerInfo.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskTrackerInfo.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/TaskTrackerInfo.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskTrackerInfo.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/TaskType.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskType.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/TaskType.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskType.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/counters/AbstractCounter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/counters/AbstractCounter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/counters/AbstractCounterGroup.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounterGroup.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/counters/AbstractCounterGroup.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounterGroup.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/counters/CounterGroupBase.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/CounterGroupBase.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/counters/CounterGroupBase.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/CounterGroupBase.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/counters/CounterGroupFactory.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/CounterGroupFactory.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/counters/CounterGroupFactory.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/CounterGroupFactory.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/counters/FrameworkCounterGroup.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FrameworkCounterGroup.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/counters/FrameworkCounterGroup.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FrameworkCounterGroup.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/counters/GenericCounter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/GenericCounter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/counters/GenericCounter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/GenericCounter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/counters/LimitExceededException.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/LimitExceededException.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/counters/LimitExceededException.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/LimitExceededException.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/counters/Limits.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/counters/Limits.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/counters/package-info.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/package-info.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/counters/package-info.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/package-info.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/DistributedCache.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/DistributedCache.java
new file mode 100644
index 0000000..8b1d3a6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/DistributedCache.java
@@ -0,0 +1,460 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.filecache;
+
+import java.io.*;
+import java.util.*;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.*;
+import org.apache.hadoop.util.*;
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+
+import java.net.URI;
+
+/**
+ * Distribute application-specific large, read-only files efficiently.
+ *
+ * <p><code>DistributedCache</code> is a facility provided by the Map-Reduce
+ * framework to cache files (text, archives, jars etc.) needed by applications.
+ * </p>
+ *
+ * <p>Applications specify the files, via urls (hdfs:// or http://) to be cached
+ * via the {@link org.apache.hadoop.mapred.JobConf}. The
+ * <code>DistributedCache</code> assumes that the files specified via urls are
+ * already present on the {@link FileSystem} at the path specified by the url
+ * and are accessible by every machine in the cluster.</p>
+ *
+ * <p>The framework will copy the necessary files on to the slave node before
+ * any tasks for the job are executed on that node. Its efficiency stems from
+ * the fact that the files are only copied once per job and the ability to
+ * cache archives which are un-archived on the slaves.</p>
+ *
+ * <p><code>DistributedCache</code> can be used to distribute simple, read-only
+ * data/text files and/or more complex types such as archives, jars etc.
+ * Archives (zip, tar and tgz/tar.gz files) are un-archived at the slave nodes.
+ * Jars may be optionally added to the classpath of the tasks, a rudimentary
+ * software distribution mechanism. Files have execution permissions.
+ * Optionally users can also direct it to symlink the distributed cache file(s)
+ * into the working directory of the task.</p>
+ *
+ * <p><code>DistributedCache</code> tracks modification timestamps of the cache
+ * files. Clearly the cache files should not be modified by the application
+ * or externally while the job is executing.</p>
+ *
+ * <p>Here is an illustrative example on how to use the
+ * <code>DistributedCache</code>:</p>
+ * <p><blockquote><pre>
+ * // Setting up the cache for the application
+ *
+ * 1. Copy the requisite files to the <code>FileSystem</code>:
+ *
+ * $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat
+ * $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip
+ * $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar
+ * $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar
+ * $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz
+ * $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz
+ *
+ * 2. Setup the application's <code>JobConf</code>:
+ *
+ * JobConf job = new JobConf();
+ * DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"),
+ * job);
+ * DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job);
+ * DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job);
+ * DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar", job);
+ * DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz", job);
+ * DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz", job);
+ *
+ * 3. Use the cached files in the {@link org.apache.hadoop.mapred.Mapper}
+ * or {@link org.apache.hadoop.mapred.Reducer}:
+ *
+ * public static class MapClass extends MapReduceBase
+ * implements Mapper<K, V, K, V> {
+ *
+ * private Path[] localArchives;
+ * private Path[] localFiles;
+ *
+ * public void configure(JobConf job) {
+ * // Get the cached archives/files
+ * localArchives = DistributedCache.getLocalCacheArchives(job);
+ * localFiles = DistributedCache.getLocalCacheFiles(job);
+ * }
+ *
+ * public void map(K key, V value,
+ * OutputCollector<K, V> output, Reporter reporter)
+ * throws IOException {
+ * // Use data from the cached archives/files here
+ * // ...
+ * // ...
+ * output.collect(k, v);
+ * }
+ * }
+ *
+ * </pre></blockquote></p>
+ *
+ * It is also very common to use the DistributedCache by using
+ * {@link org.apache.hadoop.util.GenericOptionsParser}.
+ *
+ * This class includes methods that should be used by users
+ * (specifically those mentioned in the example above, as well
+ * as {@link DistributedCache#addArchiveToClassPath(Path, Configuration)}),
+ * as well as methods intended for use by the MapReduce framework
+ * (e.g., {@link org.apache.hadoop.mapred.JobClient}).
+ *
+ * @see org.apache.hadoop.mapred.JobConf
+ * @see org.apache.hadoop.mapred.JobClient
+ */
+@Deprecated
+@InterfaceAudience.Private
+public class DistributedCache {
+
+ /**
+ * Set the configuration with the given set of archives. Intended
+ * to be used by user code.
+ * @param archives The list of archives that need to be localized
+ * @param conf Configuration which will be changed
+ * @deprecated Use {@link Job#setCacheArchives(URI[])} instead
+ */
+ @Deprecated
+ public static void setCacheArchives(URI[] archives, Configuration conf) {
+ String sarchives = StringUtils.uriToString(archives);
+ conf.set(MRJobConfig.CACHE_ARCHIVES, sarchives);
+ }
+
+ /**
+ * Set the configuration with the given set of files. Intended to be
+ * used by user code.
+ * @param files The list of files that need to be localized
+ * @param conf Configuration which will be changed
+ * @deprecated Use {@link Job#setCacheFiles(URI[])} instead
+ */
+ @Deprecated
+ public static void setCacheFiles(URI[] files, Configuration conf) {
+ String sfiles = StringUtils.uriToString(files);
+ conf.set(MRJobConfig.CACHE_FILES, sfiles);
+ }
+
+ /**
+ * Get cache archives set in the Configuration. Used by
+ * internal DistributedCache and MapReduce code.
+ * @param conf The configuration which contains the archives
+ * @return A URI array of the caches set in the Configuration
+ * @throws IOException
+ * @deprecated Use {@link JobContext#getCacheArchives()} instead
+ */
+ @Deprecated
+ public static URI[] getCacheArchives(Configuration conf) throws IOException {
+ return StringUtils.stringToURI(conf.getStrings(MRJobConfig.CACHE_ARCHIVES));
+ }
+
+ /**
+ * Get cache files set in the Configuration. Used by internal
+ * DistributedCache and MapReduce code.
+ * @param conf The configuration which contains the files
+ * @return A URI array of the files set in the Configuration
+ * @throws IOException
+ * @deprecated Use {@link JobContext#getCacheFiles()} instead
+ */
+ @Deprecated
+ public static URI[] getCacheFiles(Configuration conf) throws IOException {
+ return StringUtils.stringToURI(conf.getStrings(MRJobConfig.CACHE_FILES));
+ }
+
+ /**
+ * Return the path array of the localized caches. Intended to be used
+ * by user code.
+ * @param conf Configuration that contains the localized archives
+ * @return A path array of localized caches
+ * @throws IOException
+ * @deprecated Use {@link JobContext#getLocalCacheArchives()} instead
+ */
+ @Deprecated
+ public static Path[] getLocalCacheArchives(Configuration conf)
+ throws IOException {
+ return StringUtils.stringToPath(conf
+ .getStrings(MRJobConfig.CACHE_LOCALARCHIVES));
+ }
+
+ /**
+ * Return the path array of the localized files. Intended to be used
+ * by user code.
+ * @param conf Configuration that contains the localized files
+ * @return A path array of localized files
+ * @throws IOException
+ * @deprecated Use {@link JobContext#getLocalCacheFiles()} instead
+ */
+ @Deprecated
+ public static Path[] getLocalCacheFiles(Configuration conf)
+ throws IOException {
+ return StringUtils.stringToPath(conf.getStrings(MRJobConfig.CACHE_LOCALFILES));
+ }
+
+ /**
+ * Get the timestamps of the archives. Used by internal
+ * DistributedCache and MapReduce code.
+ * @param conf The configuration which stored the timestamps
+ * @return a string array of timestamps
+ * @throws IOException
+ * @deprecated Use {@link JobContext#getArchiveTimestamps()} instead
+ */
+ @Deprecated
+ public static String[] getArchiveTimestamps(Configuration conf) {
+ return conf.getStrings(MRJobConfig.CACHE_ARCHIVES_TIMESTAMPS);
+ }
+
+
+ /**
+ * Get the timestamps of the files. Used by internal
+ * DistributedCache and MapReduce code.
+ * @param conf The configuration which stored the timestamps
+ * @return a string array of timestamps
+ * @throws IOException
+ * @deprecated Use {@link JobContext#getFileTimestamps()} instead
+ */
+ @Deprecated
+ public static String[] getFileTimestamps(Configuration conf) {
+ return conf.getStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS);
+ }
+
+ /**
+ * Add a archives to be localized to the conf. Intended to
+ * be used by user code.
+ * @param uri The uri of the cache to be localized
+ * @param conf Configuration to add the cache to
+ * @deprecated Use {@link Job#addCacheArchive(URI)} instead
+ */
+ @Deprecated
+ public static void addCacheArchive(URI uri, Configuration conf) {
+ String archives = conf.get(MRJobConfig.CACHE_ARCHIVES);
+ conf.set(MRJobConfig.CACHE_ARCHIVES, archives == null ? uri.toString()
+ : archives + "," + uri.toString());
+ }
+
+ /**
+ * Add a file to be localized to the conf. Intended
+ * to be used by user code.
+ * @param uri The uri of the cache to be localized
+ * @param conf Configuration to add the cache to
+ * @deprecated Use {@link Job#addCacheFile(URI)} instead
+ */
+ @Deprecated
+ public static void addCacheFile(URI uri, Configuration conf) {
+ String files = conf.get(MRJobConfig.CACHE_FILES);
+ conf.set(MRJobConfig.CACHE_FILES, files == null ? uri.toString() : files + ","
+ + uri.toString());
+ }
+
+ /**
+ * Add an file path to the current set of classpath entries It adds the file
+ * to cache as well. Intended to be used by user code.
+ *
+ * @param file Path of the file to be added
+ * @param conf Configuration that contains the classpath setting
+ * @deprecated Use {@link Job#addFileToClassPath(Path)} instead
+ */
+ @Deprecated
+ public static void addFileToClassPath(Path file, Configuration conf)
+ throws IOException {
+ String classpath = conf.get(MRJobConfig.CLASSPATH_FILES);
+ conf.set(MRJobConfig.CLASSPATH_FILES, classpath == null ? file.toString()
+ : classpath + "," + file.toString());
+ FileSystem fs = FileSystem.get(conf);
+ URI uri = fs.makeQualified(file).toUri();
+
+ addCacheFile(uri, conf);
+ }
+
+ /**
+ * Get the file entries in classpath as an array of Path.
+ * Used by internal DistributedCache code.
+ *
+ * @param conf Configuration that contains the classpath setting
+ * @deprecated Use {@link JobContext#getFileClassPaths()} instead
+ */
+ @Deprecated
+ public static Path[] getFileClassPaths(Configuration conf) {
+ ArrayList<String> list = (ArrayList<String>)conf.getStringCollection(
+ MRJobConfig.CLASSPATH_FILES);
+ if (list.size() == 0) {
+ return null;
+ }
+ Path[] paths = new Path[list.size()];
+ for (int i = 0; i < list.size(); i++) {
+ paths[i] = new Path(list.get(i));
+ }
+ return paths;
+ }
+
+ /**
+ * Add an archive path to the current set of classpath entries. It adds the
+ * archive to cache as well. Intended to be used by user code.
+ *
+ * @param archive Path of the archive to be added
+ * @param conf Configuration that contains the classpath setting
+ * @deprecated Use {@link Job#addArchiveToClassPath(Path)} instead
+ */
+ @Deprecated
+ public static void addArchiveToClassPath(Path archive, Configuration conf)
+ throws IOException {
+ String classpath = conf.get(MRJobConfig.CLASSPATH_ARCHIVES);
+ conf.set(MRJobConfig.CLASSPATH_ARCHIVES, classpath == null ? archive
+ .toString() : classpath + "," + archive.toString());
+ FileSystem fs = FileSystem.get(conf);
+ URI uri = fs.makeQualified(archive).toUri();
+
+ addCacheArchive(uri, conf);
+ }
+
+ /**
+ * Get the archive entries in classpath as an array of Path.
+ * Used by internal DistributedCache code.
+ *
+ * @param conf Configuration that contains the classpath setting
+ * @deprecated Use {@link JobContext#getArchiveClassPaths()} instead
+ */
+ @Deprecated
+ public static Path[] getArchiveClassPaths(Configuration conf) {
+ ArrayList<String> list = (ArrayList<String>)conf.getStringCollection(
+ MRJobConfig.CLASSPATH_ARCHIVES);
+ if (list.size() == 0) {
+ return null;
+ }
+ Path[] paths = new Path[list.size()];
+ for (int i = 0; i < list.size(); i++) {
+ paths[i] = new Path(list.get(i));
+ }
+ return paths;
+ }
+
+ /**
+ * This method allows you to create symlinks in the current working directory
+ * of the task to all the cache files/archives.
+ * Intended to be used by user code.
+ * @param conf the jobconf
+ * @deprecated Use {@link Job#createSymlink()} instead
+ */
+ @Deprecated
+ public static void createSymlink(Configuration conf){
+ conf.set(MRJobConfig.CACHE_SYMLINK, "yes");
+ }
+
+ /**
+ * This method checks to see if symlinks are to be create for the
+ * localized cache files in the current working directory
+ * Used by internal DistributedCache code.
+ * @param conf the jobconf
+ * @return true if symlinks are to be created- else return false
+ * @deprecated Use {@link JobContext#getSymlink()} instead
+ */
+ @Deprecated
+ public static boolean getSymlink(Configuration conf){
+ String result = conf.get(MRJobConfig.CACHE_SYMLINK);
+ if ("yes".equals(result)){
+ return true;
+ }
+ return false;
+ }
+
+ private static boolean[] parseBooleans(String[] strs) {
+ if (null == strs) {
+ return null;
+ }
+ boolean[] result = new boolean[strs.length];
+ for(int i=0; i < strs.length; ++i) {
+ result[i] = Boolean.parseBoolean(strs[i]);
+ }
+ return result;
+ }
+
+ /**
+ * Get the booleans on whether the files are public or not. Used by
+ * internal DistributedCache and MapReduce code.
+ * @param conf The configuration which stored the timestamps
+ * @return a string array of booleans
+ * @throws IOException
+ */
+ public static boolean[] getFileVisibilities(Configuration conf) {
+ return parseBooleans(conf.getStrings(MRJobConfig.CACHE_FILE_VISIBILITIES));
+ }
+
+ /**
+ * Get the booleans on whether the archives are public or not. Used by
+ * internal DistributedCache and MapReduce code.
+ * @param conf The configuration which stored the timestamps
+ * @return a string array of booleans
+ */
+ public static boolean[] getArchiveVisibilities(Configuration conf) {
+ return parseBooleans(conf.getStrings(MRJobConfig.CACHE_ARCHIVES_VISIBILITIES));
+ }
+
+ /**
+ * This method checks if there is a conflict in the fragment names
+ * of the uris. Also makes sure that each uri has a fragment. It
+ * is only to be called if you want to create symlinks for
+ * the various archives and files. May be used by user code.
+ * @param uriFiles The uri array of urifiles
+ * @param uriArchives the uri array of uri archives
+ */
+ public static boolean checkURIs(URI[] uriFiles, URI[] uriArchives) {
+ if ((uriFiles == null) && (uriArchives == null)) {
+ return true;
+ }
+ // check if fragment is null for any uri
+ // also check if there are any conflicts in fragment names
+ Set<String> fragments = new HashSet<String>();
+
+ // iterate over file uris
+ if (uriFiles != null) {
+ for (int i = 0; i < uriFiles.length; i++) {
+ String fragment = uriFiles[i].getFragment();
+ if (fragment == null) {
+ return false;
+ }
+ String lowerCaseFragment = fragment.toLowerCase();
+ if (fragments.contains(lowerCaseFragment)) {
+ return false;
+ }
+ fragments.add(lowerCaseFragment);
+ }
+ }
+
+ // iterate over archive uris
+ if (uriArchives != null) {
+ for (int i = 0; i < uriArchives.length; i++) {
+ String fragment = uriArchives[i].getFragment();
+ if (fragment == null) {
+ return false;
+ }
+ String lowerCaseFragment = fragment.toLowerCase();
+ if (fragments.contains(lowerCaseFragment)) {
+ return false;
+ }
+ fragments.add(lowerCaseFragment);
+ }
+ }
+ return true;
+ }
+
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/filecache/package-info.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/package-info.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/filecache/package-info.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/package-info.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/AvroArrayUtils.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/AvroArrayUtils.java
new file mode 100644
index 0000000..70c47d0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/AvroArrayUtils.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.jobhistory;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.avro.Schema;
+import org.apache.avro.generic.GenericData;
+
+public class AvroArrayUtils {
+
+ private static final Schema ARRAY_INT
+ = Schema.createArray(Schema.create(Schema.Type.INT));
+
+ static public List<Integer> NULL_PROGRESS_SPLITS_ARRAY
+ = new GenericData.Array<Integer>(0, ARRAY_INT);
+
+ public static List<Integer>
+ toAvro(int values[]) {
+ List<Integer> result = new ArrayList<Integer>(values.length);
+
+ for (int i = 0; i < values.length; ++i) {
+ result.add(values[i]);
+ }
+
+ return result;
+ }
+
+ public static int[] fromAvro(List<Integer> avro) {
+ int[] result = new int[(int)avro.size()];
+
+ int i = 0;
+
+ for (Iterator<Integer> iter = avro.iterator(); iter.hasNext(); ++i) {
+ result[i] = iter.next();
+ }
+
+ return result;
+ }
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/EventReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/EventReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventReader.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
new file mode 100644
index 0000000..2d5222f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
@@ -0,0 +1,117 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.jobhistory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import org.apache.avro.Schema;
+import org.apache.avro.io.DatumWriter;
+import org.apache.avro.io.Encoder;
+import org.apache.avro.io.JsonEncoder;
+import org.apache.avro.specific.SpecificDatumWriter;
+import org.apache.avro.util.Utf8;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.CounterGroup;
+import org.apache.hadoop.mapreduce.Counters;
+
+/**
+ * Event Writer is an utility class used to write events to the underlying
+ * stream. Typically, one event writer (which translates to one stream)
+ * is created per job
+ *
+ */
+class EventWriter {
+ static final String VERSION = "Avro-Json";
+
+ private FSDataOutputStream out;
+ private DatumWriter<Event> writer =
+ new SpecificDatumWriter<Event>(Event.class);
+ private Encoder encoder;
+ private static final Log LOG = LogFactory.getLog(EventWriter.class);
+
+ EventWriter(FSDataOutputStream out) throws IOException {
+ this.out = out;
+ out.writeBytes(VERSION);
+ out.writeBytes("\n");
+ out.writeBytes(Event.SCHEMA$.toString());
+ out.writeBytes("\n");
+ this.encoder = new JsonEncoder(Event.SCHEMA$, out);
+ }
+
+ synchronized void write(HistoryEvent event) throws IOException {
+ Event wrapper = new Event();
+ wrapper.type = event.getEventType();
+ wrapper.event = event.getDatum();
+ writer.write(wrapper, encoder);
+ encoder.flush();
+ out.writeBytes("\n");
+ }
+
+ void flush() throws IOException {
+ encoder.flush();
+ out.flush();
+ }
+
+ void close() throws IOException {
+ try {
+ encoder.flush();
+ out.close();
+ out = null;
+ } finally {
+ IOUtils.cleanup(LOG, out);
+ }
+ }
+
+ private static final Schema GROUPS =
+ Schema.createArray(JhCounterGroup.SCHEMA$);
+
+ private static final Schema COUNTERS =
+ Schema.createArray(JhCounter.SCHEMA$);
+
+ static JhCounters toAvro(Counters counters) {
+ return toAvro(counters, "COUNTERS");
+ }
+ static JhCounters toAvro(Counters counters, String name) {
+ JhCounters result = new JhCounters();
+ result.name = new Utf8(name);
+ result.groups = new ArrayList<JhCounterGroup>(0);
+ if (counters == null) return result;
+ for (CounterGroup group : counters) {
+ JhCounterGroup g = new JhCounterGroup();
+ g.name = new Utf8(group.getName());
+ g.displayName = new Utf8(group.getDisplayName());
+ g.counts = new ArrayList<JhCounter>(group.size());
+ for (Counter counter : group) {
+ JhCounter c = new JhCounter();
+ c.name = new Utf8(counter.getName());
+ c.displayName = new Utf8(counter.getDisplayName());
+ c.value = counter.getValue();
+ g.counts.add(c);
+ }
+ result.groups.add(g);
+ }
+ return result;
+ }
+
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/HistoryEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryEvent.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/HistoryEvent.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryEvent.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
new file mode 100644
index 0000000..82a948c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
@@ -0,0 +1,796 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.jobhistory;
+
+import java.io.IOException;
+import java.text.DecimalFormat;
+import java.text.Format;
+import java.text.SimpleDateFormat;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.TaskStatus;
+import org.apache.hadoop.mapreduce.CounterGroup;
+import org.apache.hadoop.mapreduce.Counters;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.TaskID;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
+import org.apache.hadoop.mapreduce.util.HostUtil;
+import org.apache.hadoop.util.StringUtils;
+
+/**
+ * HistoryViewer is used to parse and view the JobHistory files
+ *
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class HistoryViewer {
+ private static SimpleDateFormat dateFormat =
+ new SimpleDateFormat("d-MMM-yyyy HH:mm:ss");
+ private FileSystem fs;
+ private JobInfo job;
+ private String jobId;
+ private boolean printAll;
+
+/**
+ * Constructs the HistoryViewer object
+ * @param historyFile The fully qualified Path of the History File
+ * @param conf The Configuration file
+ * @param printAll Toggle to print all status to only killed/failed status
+ * @throws IOException
+ */
+ public HistoryViewer(String historyFile,
+ Configuration conf,
+ boolean printAll) throws IOException {
+ this.printAll = printAll;
+ String errorMsg = "Unable to initialize History Viewer";
+ try {
+ Path jobFile = new Path(historyFile);
+ fs = jobFile.getFileSystem(conf);
+ String[] jobDetails =
+ jobFile.getName().split("_");
+ if (jobDetails.length < 2) {
+ // NOT a valid name
+ System.err.println("Ignore unrecognized file: " + jobFile.getName());
+ throw new IOException(errorMsg);
+ }
+ JobHistoryParser parser = new JobHistoryParser(fs, jobFile);
+ job = parser.parse();
+ jobId = job.getJobId().toString();
+ } catch(Exception e) {
+ throw new IOException(errorMsg, e);
+ }
+ }
+
+ /**
+ * Print the job/task/attempt summary information
+ * @throws IOException
+ */
+ public void print() throws IOException{
+ printJobDetails();
+ printTaskSummary();
+ printJobAnalysis();
+ printTasks(TaskType.JOB_SETUP, TaskStatus.State.FAILED.toString());
+ printTasks(TaskType.JOB_SETUP, TaskStatus.State.KILLED.toString());
+ printTasks(TaskType.MAP, TaskStatus.State.FAILED.toString());
+ printTasks(TaskType.MAP, TaskStatus.State.KILLED.toString());
+ printTasks(TaskType.REDUCE, TaskStatus.State.FAILED.toString());
+ printTasks(TaskType.REDUCE, TaskStatus.State.KILLED.toString());
+ printTasks(TaskType.JOB_CLEANUP, TaskStatus.State.FAILED.toString());
+ printTasks(TaskType.JOB_CLEANUP,
+ JobStatus.getJobRunState(JobStatus.KILLED));
+ if (printAll) {
+ printTasks(TaskType.JOB_SETUP, TaskStatus.State.SUCCEEDED.toString());
+ printTasks(TaskType.MAP, TaskStatus.State.SUCCEEDED.toString());
+ printTasks(TaskType.REDUCE, TaskStatus.State.SUCCEEDED.toString());
+ printTasks(TaskType.JOB_CLEANUP, TaskStatus.State.SUCCEEDED.toString());
+ printAllTaskAttempts(TaskType.JOB_SETUP);
+ printAllTaskAttempts(TaskType.MAP);
+ printAllTaskAttempts(TaskType.REDUCE);
+ printAllTaskAttempts(TaskType.JOB_CLEANUP);
+ }
+
+ FilteredJob filter = new FilteredJob(job,
+ TaskStatus.State.FAILED.toString());
+ printFailedAttempts(filter);
+
+ filter = new FilteredJob(job,
+ TaskStatus.State.KILLED.toString());
+ printFailedAttempts(filter);
+ }
+
+ private void printJobDetails() {
+ StringBuffer jobDetails = new StringBuffer();
+ jobDetails.append("\nHadoop job: " ).append(job.getJobId());
+ jobDetails.append("\n=====================================");
+ jobDetails.append("\nUser: ").append(job.getUsername());
+ jobDetails.append("\nJobName: ").append(job.getJobname());
+ jobDetails.append("\nJobConf: ").append(job.getJobConfPath());
+ jobDetails.append("\nSubmitted At: ").append(StringUtils.
+ getFormattedTimeWithDiff(dateFormat,
+ job.getSubmitTime(), 0));
+ jobDetails.append("\nLaunched At: ").append(StringUtils.
+ getFormattedTimeWithDiff(dateFormat,
+ job.getLaunchTime(),
+ job.getSubmitTime()));
+ jobDetails.append("\nFinished At: ").append(StringUtils.
+ getFormattedTimeWithDiff(dateFormat,
+ job.getFinishTime(),
+ job.getLaunchTime()));
+ jobDetails.append("\nStatus: ").append(((job.getJobStatus() == null) ?
+ "Incomplete" :job.getJobStatus()));
+ printCounters(jobDetails, job.getTotalCounters(), job.getMapCounters(),
+ job.getReduceCounters());
+ jobDetails.append("\n");
+ jobDetails.append("\n=====================================");
+ System.out.println(jobDetails.toString());
+ }
+
+ private void printCounters(StringBuffer buff, Counters totalCounters,
+ Counters mapCounters, Counters reduceCounters) {
+ // Killed jobs might not have counters
+ if (totalCounters == null) {
+ return;
+ }
+ buff.append("\nCounters: \n\n");
+ buff.append(String.format("|%1$-30s|%2$-30s|%3$-10s|%4$-10s|%5$-10s|",
+ "Group Name",
+ "Counter name",
+ "Map Value",
+ "Reduce Value",
+ "Total Value"));
+ buff.append("\n------------------------------------------"+
+ "---------------------------------------------");
+ for (String groupName : totalCounters.getGroupNames()) {
+ CounterGroup totalGroup = totalCounters.getGroup(groupName);
+ CounterGroup mapGroup = mapCounters.getGroup(groupName);
+ CounterGroup reduceGroup = reduceCounters.getGroup(groupName);
+
+ Format decimal = new DecimalFormat();
+ Iterator<org.apache.hadoop.mapreduce.Counter> ctrItr =
+ totalGroup.iterator();
+ while(ctrItr.hasNext()) {
+ org.apache.hadoop.mapreduce.Counter counter = ctrItr.next();
+ String name = counter.getName();
+ String mapValue =
+ decimal.format(mapGroup.findCounter(name).getValue());
+ String reduceValue =
+ decimal.format(reduceGroup.findCounter(name).getValue());
+ String totalValue =
+ decimal.format(counter.getValue());
+
+ buff.append(
+ String.format("\n|%1$-30s|%2$-30s|%3$-10s|%4$-10s|%5$-10s",
+ totalGroup.getDisplayName(),
+ counter.getDisplayName(),
+ mapValue, reduceValue, totalValue));
+ }
+ }
+ }
+
+ private void printAllTaskAttempts(TaskType taskType) {
+ Map<TaskID, TaskInfo> tasks = job.getAllTasks();
+ StringBuffer taskList = new StringBuffer();
+ taskList.append("\n").append(taskType);
+ taskList.append(" task list for ").append(job.getJobId());
+ taskList.append("\nTaskId\t\tStartTime");
+ if (TaskType.REDUCE.equals(taskType)) {
+ taskList.append("\tShuffleFinished\tSortFinished");
+ }
+ taskList.append("\tFinishTime\tHostName\tError\tTaskLogs");
+ taskList.append("\n====================================================");
+ System.out.println(taskList.toString());
+ for (JobHistoryParser.TaskInfo task : tasks.values()) {
+ for (JobHistoryParser.TaskAttemptInfo attempt :
+ task.getAllTaskAttempts().values()) {
+ if (taskType.equals(task.getTaskType())){
+ taskList.setLength(0);
+ taskList.append(attempt.getAttemptId()).append("\t");
+ taskList.append(StringUtils.getFormattedTimeWithDiff(dateFormat,
+ attempt.getStartTime(), 0)).append("\t");
+ if (TaskType.REDUCE.equals(taskType)) {
+ taskList.append(StringUtils.getFormattedTimeWithDiff(dateFormat,
+ attempt.getShuffleFinishTime(),
+ attempt.getStartTime()));
+ taskList.append("\t");
+ taskList.append(StringUtils.getFormattedTimeWithDiff(dateFormat,
+ attempt.getSortFinishTime(),
+ attempt.getShuffleFinishTime()));
+ }
+ taskList.append(StringUtils.getFormattedTimeWithDiff(dateFormat,
+ attempt.getFinishTime(),
+ attempt.getStartTime()));
+ taskList.append("\t");
+ taskList.append(attempt.getHostname()).append("\t");
+ taskList.append(attempt.getError());
+ String taskLogsUrl = getTaskLogsUrl(attempt);
+ taskList.append(taskLogsUrl != null ? taskLogsUrl : "n/a");
+ System.out.println(taskList.toString());
+ }
+ }
+ }
+ }
+
+ private void printTaskSummary() {
+ SummarizedJob ts = new SummarizedJob(job);
+ StringBuffer taskSummary = new StringBuffer();
+ taskSummary.append("\nTask Summary");
+ taskSummary.append("\n============================");
+ taskSummary.append("\nKind\tTotal\t");
+ taskSummary.append("Successful\tFailed\tKilled\tStartTime\tFinishTime");
+ taskSummary.append("\n");
+ taskSummary.append("\nSetup\t").append(ts.totalSetups);
+ taskSummary.append("\t").append(ts.numFinishedSetups);
+ taskSummary.append("\t\t").append(ts.numFailedSetups);
+ taskSummary.append("\t").append(ts.numKilledSetups);
+ taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
+ dateFormat, ts.setupStarted, 0));
+ taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
+ dateFormat, ts.setupFinished, ts.setupStarted));
+ taskSummary.append("\nMap\t").append(ts.totalMaps);
+ taskSummary.append("\t").append(job.getFinishedMaps());
+ taskSummary.append("\t\t").append(ts.numFailedMaps);
+ taskSummary.append("\t").append(ts.numKilledMaps);
+ taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
+ dateFormat, ts.mapStarted, 0));
+ taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
+ dateFormat, ts.mapFinished, ts.mapStarted));
+ taskSummary.append("\nReduce\t").append(ts.totalReduces);
+ taskSummary.append("\t").append(job.getFinishedReduces());
+ taskSummary.append("\t\t").append(ts.numFailedReduces);
+ taskSummary.append("\t").append(ts.numKilledReduces);
+ taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
+ dateFormat, ts.reduceStarted, 0));
+ taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
+ dateFormat, ts.reduceFinished, ts.reduceStarted));
+ taskSummary.append("\nCleanup\t").append(ts.totalCleanups);
+ taskSummary.append("\t").append(ts.numFinishedCleanups);
+ taskSummary.append("\t\t").append(ts.numFailedCleanups);
+ taskSummary.append("\t").append(ts.numKilledCleanups);
+ taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
+ dateFormat, ts.cleanupStarted, 0));
+ taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
+ dateFormat, ts.cleanupFinished,
+ ts.cleanupStarted));
+ taskSummary.append("\n============================\n");
+ System.out.println(taskSummary.toString());
+ }
+
+ private void printJobAnalysis() {
+ if (!job.getJobStatus().equals
+ (JobStatus.getJobRunState(JobStatus.SUCCEEDED))) {
+ System.out.println("No Analysis available as job did not finish");
+ return;
+ }
+
+ AnalyzedJob avg = new AnalyzedJob(job);
+
+ System.out.println("\nAnalysis");
+ System.out.println("=========");
+ printAnalysis(avg.getMapTasks(), cMap, "map", avg.getAvgMapTime(), 10);
+ printLast(avg.getMapTasks(), "map", cFinishMapRed);
+
+ if (avg.getReduceTasks().length > 0) {
+ printAnalysis(avg.getReduceTasks(), cShuffle, "shuffle",
+ avg.getAvgShuffleTime(), 10);
+ printLast(avg.getReduceTasks(), "shuffle", cFinishShuffle);
+
+ printAnalysis(avg.getReduceTasks(), cReduce, "reduce",
+ avg.getAvgReduceTime(), 10);
+ printLast(avg.getReduceTasks(), "reduce", cFinishMapRed);
+ }
+ System.out.println("=========");
+ }
+
+ private void printAnalysis(JobHistoryParser.TaskAttemptInfo [] tasks,
+ Comparator<JobHistoryParser.TaskAttemptInfo> cmp,
+ String taskType,
+ long avg,
+ int showTasks) {
+ Arrays.sort(tasks, cmp);
+ JobHistoryParser.TaskAttemptInfo min = tasks[tasks.length-1];
+ StringBuffer details = new StringBuffer();
+ details.append("\nTime taken by best performing ");
+ details.append(taskType).append(" task ");
+ details.append(min.getAttemptId().getTaskID().toString()).append(": ");
+ if ("map".equals(taskType)) {
+ details.append(StringUtils.formatTimeDiff(
+ min.getFinishTime(),
+ min.getStartTime()));
+ } else if ("shuffle".equals(taskType)) {
+ details.append(StringUtils.formatTimeDiff(
+ min.getShuffleFinishTime(),
+ min.getStartTime()));
+ } else {
+ details.append(StringUtils.formatTimeDiff(
+ min.getFinishTime(),
+ min.getShuffleFinishTime()));
+ }
+ details.append("\nAverage time taken by ");
+ details.append(taskType).append(" tasks: ");
+ details.append(StringUtils.formatTimeDiff(avg, 0));
+ details.append("\nWorse performing ");
+ details.append(taskType).append(" tasks: ");
+ details.append("\nTaskId\t\tTimetaken");
+ System.out.println(details.toString());
+ for (int i = 0; i < showTasks && i < tasks.length; i++) {
+ details.setLength(0);
+ details.append(tasks[i].getAttemptId().getTaskID()).append(" ");
+ if ("map".equals(taskType)) {
+ details.append(StringUtils.formatTimeDiff(
+ tasks[i].getFinishTime(),
+ tasks[i].getStartTime()));
+ } else if ("shuffle".equals(taskType)) {
+ details.append(StringUtils.formatTimeDiff(
+ tasks[i].getShuffleFinishTime(),
+ tasks[i].getStartTime()));
+ } else {
+ details.append(StringUtils.formatTimeDiff(
+ tasks[i].getFinishTime(),
+ tasks[i].getShuffleFinishTime()));
+ }
+ System.out.println(details.toString());
+ }
+ }
+
+ private void printLast(JobHistoryParser.TaskAttemptInfo [] tasks,
+ String taskType,
+ Comparator<JobHistoryParser.TaskAttemptInfo> cmp
+ ) {
+ Arrays.sort(tasks, cFinishMapRed);
+ JobHistoryParser.TaskAttemptInfo last = tasks[0];
+ StringBuffer lastBuf = new StringBuffer();
+ lastBuf.append("The last ").append(taskType);
+ lastBuf.append(" task ").append(last.getAttemptId().getTaskID());
+ Long finishTime;
+ if ("shuffle".equals(taskType)) {
+ finishTime = last.getShuffleFinishTime();
+ } else {
+ finishTime = last.getFinishTime();
+ }
+ lastBuf.append(" finished at (relative to the Job launch time): ");
+ lastBuf.append(StringUtils.getFormattedTimeWithDiff(dateFormat,
+ finishTime, job.getLaunchTime()));
+ System.out.println(lastBuf.toString());
+ }
+
+ private void printTasks(TaskType taskType, String status) {
+ Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks();
+ StringBuffer header = new StringBuffer();
+ header.append("\n").append(status).append(" ");
+ header.append(taskType).append(" task list for ").append(jobId);
+ header.append("\nTaskId\t\tStartTime\tFinishTime\tError");
+ if (TaskType.MAP.equals(taskType)) {
+ header.append("\tInputSplits");
+ }
+ header.append("\n====================================================");
+ StringBuffer taskList = new StringBuffer();
+ for (JobHistoryParser.TaskInfo task : tasks.values()) {
+ if (taskType.equals(task.getTaskType()) &&
+ (status.equals(task.getTaskStatus())
+ || status.equalsIgnoreCase("ALL"))) {
+ taskList.setLength(0);
+ taskList.append(task.getTaskId());
+ taskList.append("\t").append(StringUtils.getFormattedTimeWithDiff(
+ dateFormat, task.getStartTime(), 0));
+ taskList.append("\t").append(StringUtils.getFormattedTimeWithDiff(
+ dateFormat, task.getFinishTime(),
+ task.getStartTime()));
+ taskList.append("\t").append(task.getError());
+ if (TaskType.MAP.equals(taskType)) {
+ taskList.append("\t").append(task.getSplitLocations());
+ }
+ if (taskList != null) {
+ System.out.println(header.toString());
+ System.out.println(taskList.toString());
+ }
+ }
+ }
+ }
+
+ private void printFailedAttempts(FilteredJob filteredJob) {
+ Map<String, Set<TaskID>> badNodes = filteredJob.getFilteredMap();
+ StringBuffer attempts = new StringBuffer();
+ if (badNodes.size() > 0) {
+ attempts.append("\n").append(filteredJob.getFilter());
+ attempts.append(" task attempts by nodes");
+ attempts.append("\nHostname\tFailedTasks");
+ attempts.append("\n===============================");
+ System.out.println(attempts.toString());
+ for (Map.Entry<String,
+ Set<TaskID>> entry : badNodes.entrySet()) {
+ String node = entry.getKey();
+ Set<TaskID> failedTasks = entry.getValue();
+ attempts.setLength(0);
+ attempts.append(node).append("\t");
+ for (TaskID t : failedTasks) {
+ attempts.append(t).append(", ");
+ }
+ System.out.println(attempts.toString());
+ }
+ }
+ }
+
+ /**
+ * Return the TaskLogsUrl of a particular TaskAttempt
+ *
+ * @param attempt
+ * @return the taskLogsUrl. null if http-port or tracker-name or
+ * task-attempt-id are unavailable.
+ */
+ public static String getTaskLogsUrl(
+ JobHistoryParser.TaskAttemptInfo attempt) {
+ if (attempt.getHttpPort() == -1
+ || attempt.getTrackerName().equals("")
+ || attempt.getAttemptId() == null) {
+ return null;
+ }
+
+ String taskTrackerName =
+ HostUtil.convertTrackerNameToHostName(
+ attempt.getTrackerName());
+ return HostUtil.getTaskLogUrl(taskTrackerName,
+ Integer.toString(attempt.getHttpPort()),
+ attempt.getAttemptId().toString());
+ }
+
+ private Comparator<JobHistoryParser.TaskAttemptInfo> cMap =
+ new Comparator<JobHistoryParser.TaskAttemptInfo>() {
+ public int compare(JobHistoryParser.TaskAttemptInfo t1,
+ JobHistoryParser.TaskAttemptInfo t2) {
+ long l1 = t1.getFinishTime() - t1.getStartTime();
+ long l2 = t2.getFinishTime() - t2.getStartTime();
+ return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1));
+ }
+ };
+
+ private Comparator<JobHistoryParser.TaskAttemptInfo> cShuffle =
+ new Comparator<JobHistoryParser.TaskAttemptInfo>() {
+ public int compare(JobHistoryParser.TaskAttemptInfo t1,
+ JobHistoryParser.TaskAttemptInfo t2) {
+ long l1 = t1.getShuffleFinishTime() - t1.getStartTime();
+ long l2 = t2.getShuffleFinishTime() - t2.getStartTime();
+ return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1));
+ }
+ };
+
+ private Comparator<JobHistoryParser.TaskAttemptInfo> cFinishShuffle =
+ new Comparator<JobHistoryParser.TaskAttemptInfo>() {
+ public int compare(JobHistoryParser.TaskAttemptInfo t1,
+ JobHistoryParser.TaskAttemptInfo t2) {
+ long l1 = t1.getShuffleFinishTime();
+ long l2 = t2.getShuffleFinishTime();
+ return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1));
+ }
+ };
+
+ private Comparator<JobHistoryParser.TaskAttemptInfo> cFinishMapRed =
+ new Comparator<JobHistoryParser.TaskAttemptInfo>() {
+ public int compare(JobHistoryParser.TaskAttemptInfo t1,
+ JobHistoryParser.TaskAttemptInfo t2) {
+ long l1 = t1.getFinishTime();
+ long l2 = t2.getFinishTime();
+ return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1));
+ }
+ };
+
+ private Comparator<JobHistoryParser.TaskAttemptInfo> cReduce =
+ new Comparator<JobHistoryParser.TaskAttemptInfo>() {
+ public int compare(JobHistoryParser.TaskAttemptInfo t1,
+ JobHistoryParser.TaskAttemptInfo t2) {
+ long l1 = t1.getFinishTime() -
+ t1.getShuffleFinishTime();
+ long l2 = t2.getFinishTime() -
+ t2.getShuffleFinishTime();
+ return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1));
+ }
+ };
+
+ /**
+ * Utility class used the summarize the job.
+ * Used by HistoryViewer and the JobHistory UI.
+ *
+ */
+ public static class SummarizedJob {
+ Map<TaskID, JobHistoryParser.TaskInfo> tasks;
+ int totalMaps = 0;
+ int totalReduces = 0;
+ int totalCleanups = 0;
+ int totalSetups = 0;
+ int numFailedMaps = 0;
+ int numKilledMaps = 0;
+ int numFailedReduces = 0;
+ int numKilledReduces = 0;
+ int numFinishedCleanups = 0;
+ int numFailedCleanups = 0;
+ int numKilledCleanups = 0;
+ int numFinishedSetups = 0;
+ int numFailedSetups = 0;
+ int numKilledSetups = 0;
+ long mapStarted = 0;
+ long mapFinished = 0;
+ long reduceStarted = 0;
+ long reduceFinished = 0;
+ long cleanupStarted = 0;
+ long cleanupFinished = 0;
+ long setupStarted = 0;
+ long setupFinished = 0;
+
+ /** Get total maps */
+ public int getTotalMaps() { return totalMaps; }
+ /** Get total reduces */
+ public int getTotalReduces() { return totalReduces; }
+ /** Get number of clean up tasks */
+ public int getTotalCleanups() { return totalCleanups; }
+ /** Get number of set up tasks */
+ public int getTotalSetups() { return totalSetups; }
+ /** Get number of failed maps */
+ public int getNumFailedMaps() { return numFailedMaps; }
+ /** Get number of killed maps */
+ public int getNumKilledMaps() { return numKilledMaps; }
+ /** Get number of failed reduces */
+ public int getNumFailedReduces() { return numFailedReduces; }
+ /** Get number of killed reduces */
+ public int getNumKilledReduces() { return numKilledReduces; }
+ /** Get number of cleanup tasks that finished */
+ public int getNumFinishedCleanups() { return numFinishedCleanups; }
+ /** Get number of failed cleanup tasks */
+ public int getNumFailedCleanups() { return numFailedCleanups; }
+ /** Get number of killed cleanup tasks */
+ public int getNumKilledCleanups() { return numKilledCleanups; }
+ /** Get number of finished set up tasks */
+ public int getNumFinishedSetups() { return numFinishedSetups; }
+ /** Get number of failed set up tasks */
+ public int getNumFailedSetups() { return numFailedSetups; }
+ /** Get number of killed set up tasks */
+ public int getNumKilledSetups() { return numKilledSetups; }
+ /** Get number of maps that were started */
+ public long getMapStarted() { return mapStarted; }
+ /** Get number of maps that finished */
+ public long getMapFinished() { return mapFinished; }
+ /** Get number of Reducers that were started */
+ public long getReduceStarted() { return reduceStarted; }
+ /** Get number of reducers that finished */
+ public long getReduceFinished() { return reduceFinished; }
+ /** Get number of cleanup tasks started */
+ public long getCleanupStarted() { return cleanupStarted; }
+ /** Get number of cleanup tasks that finished */
+ public long getCleanupFinished() { return cleanupFinished; }
+ /** Get number of setup tasks that started */
+ public long getSetupStarted() { return setupStarted; }
+ /** Get number of setup tasks that finished */
+ public long getSetupFinished() { return setupFinished; }
+
+ /** Create summary information for the parsed job */
+ public SummarizedJob(JobInfo job) {
+ tasks = job.getAllTasks();
+
+ for (JobHistoryParser.TaskInfo task : tasks.values()) {
+ Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts =
+ task.getAllTaskAttempts();
+ //allHosts.put(task.getHo(Keys.HOSTNAME), "");
+ for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) {
+ long startTime = attempt.getStartTime();
+ long finishTime = attempt.getFinishTime();
+ if (attempt.getTaskType().equals(TaskType.MAP)) {
+ if (mapStarted== 0 || mapStarted > startTime) {
+ mapStarted = startTime;
+ }
+ if (mapFinished < finishTime) {
+ mapFinished = finishTime;
+ }
+ totalMaps++;
+ if (attempt.getTaskStatus().equals
+ (TaskStatus.State.FAILED.toString())) {
+ numFailedMaps++;
+ } else if (attempt.getTaskStatus().equals
+ (TaskStatus.State.KILLED.toString())) {
+ numKilledMaps++;
+ }
+ } else if (attempt.getTaskType().equals(TaskType.REDUCE)) {
+ if (reduceStarted==0||reduceStarted > startTime) {
+ reduceStarted = startTime;
+ }
+ if (reduceFinished < finishTime) {
+ reduceFinished = finishTime;
+ }
+ totalReduces++;
+ if (attempt.getTaskStatus().equals
+ (TaskStatus.State.FAILED.toString())) {
+ numFailedReduces++;
+ } else if (attempt.getTaskStatus().equals
+ (TaskStatus.State.KILLED.toString())) {
+ numKilledReduces++;
+ }
+ } else if (attempt.getTaskType().equals(TaskType.JOB_CLEANUP)) {
+ if (cleanupStarted==0||cleanupStarted > startTime) {
+ cleanupStarted = startTime;
+ }
+ if (cleanupFinished < finishTime) {
+ cleanupFinished = finishTime;
+ }
+ totalCleanups++;
+ if (attempt.getTaskStatus().equals
+ (TaskStatus.State.SUCCEEDED.toString())) {
+ numFinishedCleanups++;
+ } else if (attempt.getTaskStatus().equals
+ (TaskStatus.State.FAILED.toString())) {
+ numFailedCleanups++;
+ } else if (attempt.getTaskStatus().equals
+ (TaskStatus.State.KILLED.toString())) {
+ numKilledCleanups++;
+ }
+ } else if (attempt.getTaskType().equals(TaskType.JOB_SETUP)) {
+ if (setupStarted==0||setupStarted > startTime) {
+ setupStarted = startTime;
+ }
+ if (setupFinished < finishTime) {
+ setupFinished = finishTime;
+ }
+ totalSetups++;
+ if (attempt.getTaskStatus().equals
+ (TaskStatus.State.SUCCEEDED.toString())) {
+ numFinishedSetups++;
+ } else if (attempt.getTaskStatus().equals
+ (TaskStatus.State.FAILED.toString())) {
+ numFailedSetups++;
+ } else if (attempt.getTaskStatus().equals
+ (TaskStatus.State.KILLED.toString())) {
+ numKilledSetups++;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Utility class used while analyzing the job.
+ * Used by HistoryViewer and the JobHistory UI.
+ */
+
+ public static class AnalyzedJob {
+ private long avgMapTime;
+ private long avgReduceTime;
+ private long avgShuffleTime;
+
+ private JobHistoryParser.TaskAttemptInfo [] mapTasks;
+ private JobHistoryParser.TaskAttemptInfo [] reduceTasks;
+
+ /** Get the average map time */
+ public long getAvgMapTime() { return avgMapTime; }
+ /** Get the average reduce time */
+ public long getAvgReduceTime() { return avgReduceTime; }
+ /** Get the average shuffle time */
+ public long getAvgShuffleTime() { return avgShuffleTime; }
+ /** Get the map tasks list */
+ public JobHistoryParser.TaskAttemptInfo [] getMapTasks() {
+ return mapTasks;
+ }
+ /** Get the reduce tasks list */
+ public JobHistoryParser.TaskAttemptInfo [] getReduceTasks() {
+ return reduceTasks;
+ }
+ /** Generate analysis information for the parsed job */
+ public AnalyzedJob (JobInfo job) {
+ Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks();
+ int finishedMaps = (int) job.getFinishedMaps();
+ int finishedReduces = (int) job.getFinishedReduces();
+ mapTasks =
+ new JobHistoryParser.TaskAttemptInfo[finishedMaps];
+ reduceTasks =
+ new JobHistoryParser.TaskAttemptInfo[finishedReduces];
+ int mapIndex = 0 , reduceIndex=0;
+ avgMapTime = 0;
+ avgReduceTime = 0;
+ avgShuffleTime = 0;
+
+ for (JobHistoryParser.TaskInfo task : tasks.values()) {
+ Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts =
+ task.getAllTaskAttempts();
+ for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) {
+ if (attempt.getTaskStatus().
+ equals(TaskStatus.State.SUCCEEDED.toString())) {
+ long avgFinishTime = (attempt.getFinishTime() -
+ attempt.getStartTime());
+ if (attempt.getTaskType().equals(TaskType.MAP)) {
+ mapTasks[mapIndex++] = attempt;
+ avgMapTime += avgFinishTime;
+ } else if (attempt.getTaskType().equals(TaskType.REDUCE)) {
+ reduceTasks[reduceIndex++] = attempt;
+ avgShuffleTime += (attempt.getShuffleFinishTime() -
+ attempt.getStartTime());
+ avgReduceTime += (attempt.getFinishTime() -
+ attempt.getShuffleFinishTime());
+ }
+ break;
+ }
+ }
+ }
+ if (finishedMaps > 0) {
+ avgMapTime /= finishedMaps;
+ }
+ if (finishedReduces > 0) {
+ avgReduceTime /= finishedReduces;
+ avgShuffleTime /= finishedReduces;
+ }
+ }
+ }
+
+ /**
+ * Utility to filter out events based on the task status
+ *
+ */
+ public static class FilteredJob {
+
+ private Map<String, Set<TaskID>> badNodesToFilteredTasks =
+ new HashMap<String, Set<TaskID>>();
+
+ private String filter;
+
+ /** Get the map of the filtered tasks */
+ public Map<String, Set<TaskID>> getFilteredMap() {
+ return badNodesToFilteredTasks;
+ }
+
+ /** Get the current filter */
+ public String getFilter() { return filter; }
+
+ /** Apply the filter (status) on the parsed job and generate summary */
+ public FilteredJob(JobInfo job, String status) {
+
+ filter = status;
+
+ Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks();
+
+ for (JobHistoryParser.TaskInfo task : tasks.values()) {
+ Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts =
+ task.getAllTaskAttempts();
+ for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) {
+ if (attempt.getTaskStatus().equals(status)) {
+ String hostname = attempt.getHostname();
+ TaskID id = attempt.getAttemptId().getTaskID();
+
+ Set<TaskID> set = badNodesToFilteredTasks.get(hostname);
+
+ if (set == null) {
+ set = new TreeSet<TaskID>();
+ set.add(id);
+ badNodesToFilteredTasks.put(hostname, set);
+ }else{
+ set.add(id);
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobFinishedEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobFinishedEvent.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobFinishedEvent.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobFinishedEvent.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobInfoChangeEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobInfoChangeEvent.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobInfoChangeEvent.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobInfoChangeEvent.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobInitedEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobInitedEvent.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobInitedEvent.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobInitedEvent.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobPriorityChangeEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobPriorityChangeEvent.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobPriorityChangeEvent.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobPriorityChangeEvent.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobStatusChangedEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobStatusChangedEvent.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobStatusChangedEvent.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobStatusChangedEvent.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobSubmittedEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobSubmittedEvent.java
new file mode 100644
index 0000000..39af924
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobSubmittedEvent.java
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.jobhistory;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.security.authorize.AccessControlList;
+
+import org.apache.avro.util.Utf8;
+
+/**
+ * Event to record the submission of a job
+ *
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class JobSubmittedEvent implements HistoryEvent {
+ private JobSubmitted datum = new JobSubmitted();
+
+ /**
+ * Create an event to record job submission
+ * @param id The job Id of the job
+ * @param jobName Name of the job
+ * @param userName Name of the user who submitted the job
+ * @param submitTime Time of submission
+ * @param jobConfPath Path of the Job Configuration file
+ * @param jobACLs The configured acls for the job.
+ * @param jobQueueName The job-queue to which this job was submitted to
+ */
+ public JobSubmittedEvent(JobID id, String jobName, String userName,
+ long submitTime, String jobConfPath,
+ Map<JobACL, AccessControlList> jobACLs, String jobQueueName) {
+ datum.jobid = new Utf8(id.toString());
+ datum.jobName = new Utf8(jobName);
+ datum.userName = new Utf8(userName);
+ datum.submitTime = submitTime;
+ datum.jobConfPath = new Utf8(jobConfPath);
+ Map<CharSequence, CharSequence> jobAcls = new HashMap<CharSequence, CharSequence>();
+ for (Entry<JobACL, AccessControlList> entry : jobACLs.entrySet()) {
+ jobAcls.put(new Utf8(entry.getKey().getAclName()), new Utf8(
+ entry.getValue().getAclString()));
+ }
+ datum.acls = jobAcls;
+ if (jobQueueName != null) {
+ datum.jobQueueName = new Utf8(jobQueueName);
+ }
+ }
+
+ JobSubmittedEvent() {}
+
+ public Object getDatum() { return datum; }
+ public void setDatum(Object datum) {
+ this.datum = (JobSubmitted)datum;
+ }
+
+ /** Get the Job Id */
+ public JobID getJobId() { return JobID.forName(datum.jobid.toString()); }
+ /** Get the Job name */
+ public String getJobName() { return datum.jobName.toString(); }
+ /** Get the Job queue name */
+ public String getJobQueueName() {
+ if (datum.jobQueueName != null) {
+ return datum.jobQueueName.toString();
+ }
+ return null;
+ }
+ /** Get the user name */
+ public String getUserName() { return datum.userName.toString(); }
+ /** Get the submit time */
+ public long getSubmitTime() { return datum.submitTime; }
+ /** Get the Path for the Job Configuration file */
+ public String getJobConfPath() { return datum.jobConfPath.toString(); }
+ /** Get the acls configured for the job **/
+ public Map<JobACL, AccessControlList> getJobAcls() {
+ Map<JobACL, AccessControlList> jobAcls =
+ new HashMap<JobACL, AccessControlList>();
+ for (JobACL jobACL : JobACL.values()) {
+ Utf8 jobACLsUtf8 = new Utf8(jobACL.getAclName());
+ if (datum.acls.containsKey(jobACLsUtf8)) {
+ jobAcls.put(jobACL, new AccessControlList(datum.acls.get(
+ jobACLsUtf8).toString()));
+ }
+ }
+ return jobAcls;
+ }
+ /** Get the event type */
+ public EventType getEventType() { return EventType.JOB_SUBMITTED; }
+
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/MapAttemptFinishedEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/MapAttemptFinishedEvent.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/MapAttemptFinishedEvent.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/MapAttemptFinishedEvent.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/ReduceAttemptFinishedEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/ReduceAttemptFinishedEvent.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/ReduceAttemptFinishedEvent.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/ReduceAttemptFinishedEvent.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptFinishedEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptFinishedEvent.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptFinishedEvent.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptFinishedEvent.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptStartedEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptStartedEvent.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptStartedEvent.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptStartedEvent.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/TaskFailedEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskFailedEvent.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/TaskFailedEvent.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskFailedEvent.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/TaskFinishedEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskFinishedEvent.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/TaskFinishedEvent.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskFinishedEvent.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/TaskStartedEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskStartedEvent.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/TaskStartedEvent.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskStartedEvent.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/TaskUpdatedEvent.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskUpdatedEvent.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/TaskUpdatedEvent.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskUpdatedEvent.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/package-info.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/package-info.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/package-info.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/package-info.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/DoubleValueSum.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/DoubleValueSum.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/DoubleValueSum.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/DoubleValueSum.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueMax.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueMax.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueMax.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueMax.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueMin.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueMin.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueMin.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueMin.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueSum.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueSum.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueSum.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueSum.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/StringValueMax.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/StringValueMax.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/StringValueMax.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/StringValueMax.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/StringValueMin.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/StringValueMin.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/StringValueMin.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/StringValueMin.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/UniqValueCount.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/UniqValueCount.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/UniqValueCount.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/UniqValueCount.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/UserDefinedValueAggregatorDescriptor.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/UserDefinedValueAggregatorDescriptor.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/UserDefinedValueAggregatorDescriptor.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/UserDefinedValueAggregatorDescriptor.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregator.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregator.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregator.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorBaseDescriptor.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorBaseDescriptor.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorBaseDescriptor.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorBaseDescriptor.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorCombiner.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorCombiner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorCombiner.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorCombiner.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorDescriptor.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorDescriptor.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorDescriptor.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorDescriptor.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJob.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJob.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJob.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJob.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJobBase.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJobBase.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJobBase.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJobBase.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorMapper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorMapper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorMapper.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorMapper.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorReducer.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorReducer.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorReducer.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorReducer.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueHistogram.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueHistogram.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueHistogram.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueHistogram.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/chain/ChainMapContextImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainMapContextImpl.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/chain/ChainMapContextImpl.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainMapContextImpl.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/chain/ChainMapper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainMapper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/chain/ChainMapper.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainMapper.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/chain/ChainReduceContextImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainReduceContextImpl.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/chain/ChainReduceContextImpl.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainReduceContextImpl.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/chain/ChainReducer.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainReducer.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/chain/ChainReducer.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainReducer.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/BigDecimalSplitter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/BigDecimalSplitter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/BigDecimalSplitter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/BigDecimalSplitter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/BooleanSplitter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/BooleanSplitter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/BooleanSplitter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/BooleanSplitter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/DBConfiguration.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBConfiguration.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/DBConfiguration.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBConfiguration.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/DBRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/DBRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/DBSplitter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBSplitter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/DBSplitter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBSplitter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/DBWritable.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBWritable.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/DBWritable.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBWritable.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/DataDrivenDBInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DataDrivenDBInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/DataDrivenDBInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DataDrivenDBInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/DataDrivenDBRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DataDrivenDBRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/DataDrivenDBRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DataDrivenDBRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/DateSplitter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DateSplitter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/DateSplitter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DateSplitter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/FloatSplitter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/FloatSplitter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/FloatSplitter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/FloatSplitter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/IntegerSplitter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/IntegerSplitter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/IntegerSplitter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/IntegerSplitter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/MySQLDBRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/MySQLDBRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/MySQLDBRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/MySQLDBRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/MySQLDataDrivenDBRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/MySQLDataDrivenDBRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/MySQLDataDrivenDBRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/MySQLDataDrivenDBRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/OracleDBRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/OracleDBRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/OracleDBRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/OracleDBRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/OracleDataDrivenDBInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/OracleDataDrivenDBInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/OracleDataDrivenDBInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/OracleDataDrivenDBInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/OracleDataDrivenDBRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/OracleDataDrivenDBRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/OracleDataDrivenDBRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/OracleDataDrivenDBRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/OracleDateSplitter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/OracleDateSplitter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/OracleDateSplitter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/OracleDateSplitter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/TextSplitter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/TextSplitter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/TextSplitter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/TextSplitter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionHelper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionHelper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionHelper.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionHelper.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionMapper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionMapper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionMapper.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionMapper.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionReducer.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionReducer.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionReducer.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionReducer.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileSplit.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileSplit.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileSplit.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileSplit.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/DelegatingInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/DelegatingInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/DelegatingInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/DelegatingInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/DelegatingMapper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/DelegatingMapper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/DelegatingMapper.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/DelegatingMapper.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/DelegatingRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/DelegatingRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/DelegatingRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/DelegatingRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormatCounter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormatCounter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormatCounter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormatCounter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/FileSplit.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileSplit.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/FileSplit.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileSplit.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/InvalidInputException.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/InvalidInputException.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/InvalidInputException.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/InvalidInputException.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/KeyValueLineRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/KeyValueLineRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/KeyValueLineRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/KeyValueLineRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/KeyValueTextInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/KeyValueTextInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/KeyValueTextInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/KeyValueTextInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/LineRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/LineRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/LineRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/LineRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/MultipleInputs.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/MultipleInputs.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/MultipleInputs.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/MultipleInputs.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/NLineInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/NLineInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/NLineInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/NLineInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileAsBinaryInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileAsBinaryInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileAsBinaryInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileAsBinaryInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileAsTextInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileAsTextInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileAsTextInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileAsTextInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileAsTextRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileAsTextRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileAsTextRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileAsTextRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileInputFilter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileInputFilter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileInputFilter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileInputFilter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/TaggedInputSplit.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/TaggedInputSplit.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/TaggedInputSplit.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/TaggedInputSplit.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/TextInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/TextInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/TextInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/TextInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/ArrayListBackedIterator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/ArrayListBackedIterator.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/ArrayListBackedIterator.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/ArrayListBackedIterator.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/ComposableInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/ComposableInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/ComposableInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/ComposableInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/ComposableRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/ComposableRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/ComposableRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/ComposableRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/CompositeInputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeInputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/CompositeInputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeInputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/CompositeInputSplit.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeInputSplit.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/CompositeInputSplit.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeInputSplit.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/CompositeRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/CompositeRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/InnerJoinRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/InnerJoinRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/InnerJoinRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/InnerJoinRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/JoinRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/JoinRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/JoinRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/JoinRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/MultiFilterRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/MultiFilterRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/MultiFilterRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/MultiFilterRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/OuterJoinRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/OuterJoinRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/OuterJoinRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/OuterJoinRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/OverrideRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/OverrideRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/OverrideRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/OverrideRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/Parser.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/Parser.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/Parser.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/Parser.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/ResetableIterator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/ResetableIterator.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/ResetableIterator.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/ResetableIterator.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/StreamBackedIterator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/StreamBackedIterator.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/StreamBackedIterator.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/StreamBackedIterator.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/TupleWritable.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/TupleWritable.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/TupleWritable.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/TupleWritable.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/WrappedRecordReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/WrappedRecordReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/WrappedRecordReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/WrappedRecordReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/map/InverseMapper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/InverseMapper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/map/InverseMapper.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/InverseMapper.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/map/RegexMapper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/RegexMapper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/map/RegexMapper.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/RegexMapper.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/map/TokenCounterMapper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/TokenCounterMapper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/map/TokenCounterMapper.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/TokenCounterMapper.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/map/WrappedMapper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/WrappedMapper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/map/WrappedMapper.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/WrappedMapper.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormatCounter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormatCounter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormatCounter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormatCounter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/FilterOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FilterOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/FilterOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FilterOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/LazyOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/LazyOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/LazyOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/LazyOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/MapFileOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MapFileOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/MapFileOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MapFileOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/NullOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/NullOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/NullOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/NullOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/SequenceFileAsBinaryOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/SequenceFileAsBinaryOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/SequenceFileAsBinaryOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/SequenceFileAsBinaryOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/SequenceFileOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/SequenceFileOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/SequenceFileOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/SequenceFileOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/TextOutputFormat.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/TextOutputFormat.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/TextOutputFormat.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/TextOutputFormat.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/partition/BinaryPartitioner.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/BinaryPartitioner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/partition/BinaryPartitioner.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/BinaryPartitioner.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/partition/HashPartitioner.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/HashPartitioner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/partition/HashPartitioner.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/HashPartitioner.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedComparator.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedComparator.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedComparator.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedComparator.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldHelper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldHelper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldHelper.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldHelper.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/partition/TotalOrderPartitioner.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/TotalOrderPartitioner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/partition/TotalOrderPartitioner.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/TotalOrderPartitioner.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/reduce/IntSumReducer.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/reduce/IntSumReducer.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/reduce/IntSumReducer.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/reduce/IntSumReducer.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/reduce/LongSumReducer.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/reduce/LongSumReducer.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/reduce/LongSumReducer.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/reduce/LongSumReducer.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/reduce/WrappedReducer.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/reduce/WrappedReducer.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/reduce/WrappedReducer.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/reduce/WrappedReducer.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java
new file mode 100644
index 0000000..72a194a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java
@@ -0,0 +1,354 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.protocol;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenSelector;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.VersionedProtocol;
+import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus;
+import org.apache.hadoop.mapreduce.ClusterMetrics;
+import org.apache.hadoop.mapreduce.Counters;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.QueueAclsInfo;
+import org.apache.hadoop.mapreduce.QueueInfo;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.TaskCompletionEvent;
+import org.apache.hadoop.mapreduce.TaskReport;
+import org.apache.hadoop.mapreduce.TaskTrackerInfo;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenInfo;
+
+/**
+ * Protocol that a JobClient and the central JobTracker use to communicate. The
+ * JobClient can use these methods to submit a Job for execution, and learn about
+ * the current system status.
+ */
+@KerberosInfo(
+ serverPrincipal = JTConfig.JT_USER_NAME)
+@TokenInfo(DelegationTokenSelector.class)
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public interface ClientProtocol extends VersionedProtocol {
+ /*
+ *Changing the versionID to 2L since the getTaskCompletionEvents method has
+ *changed.
+ *Changed to 4 since killTask(String,boolean) is added
+ *Version 4: added jobtracker state to ClusterStatus
+ *Version 5: max_tasks in ClusterStatus is replaced by
+ * max_map_tasks and max_reduce_tasks for HADOOP-1274
+ * Version 6: change the counters representation for HADOOP-2248
+ * Version 7: added getAllJobs for HADOOP-2487
+ * Version 8: change {job|task}id's to use corresponding objects rather that strings.
+ * Version 9: change the counter representation for HADOOP-1915
+ * Version 10: added getSystemDir for HADOOP-3135
+ * Version 11: changed JobProfile to include the queue name for HADOOP-3698
+ * Version 12: Added getCleanupTaskReports and
+ * cleanupProgress to JobStatus as part of HADOOP-3150
+ * Version 13: Added getJobQueueInfos and getJobQueueInfo(queue name)
+ * and getAllJobs(queue) as a part of HADOOP-3930
+ * Version 14: Added setPriority for HADOOP-4124
+ * Version 15: Added KILLED status to JobStatus as part of HADOOP-3924
+ * Version 16: Added getSetupTaskReports and
+ * setupProgress to JobStatus as part of HADOOP-4261
+ * Version 17: getClusterStatus returns the amount of memory used by
+ * the server. HADOOP-4435
+ * Version 18: Added blacklisted trackers to the ClusterStatus
+ * for HADOOP-4305
+ * Version 19: Modified TaskReport to have TIP status and modified the
+ * method getClusterStatus() to take a boolean argument
+ * for HADOOP-4807
+ * Version 20: Modified ClusterStatus to have the tasktracker expiry
+ * interval for HADOOP-4939
+ * Version 21: Modified TaskID to be aware of the new TaskTypes
+ * Version 22: Added method getQueueAclsForCurrentUser to get queue acls info
+ * for a user
+ * Version 23: Modified the JobQueueInfo class to inlucde queue state.
+ * Part of HADOOP-5913.
+ * Version 24: Modified ClusterStatus to include BlackListInfo class which
+ * encapsulates reasons and report for blacklisted node.
+ * Version 25: Added fields to JobStatus for HADOOP-817.
+ * Version 26: Added properties to JobQueueInfo as part of MAPREDUCE-861.
+ * added new api's getRootQueues and
+ * getChildQueues(String queueName)
+ * Version 27: Changed protocol to use new api objects. And the protocol is
+ * renamed from JobSubmissionProtocol to ClientProtocol.
+ * Version 28: Added getJobHistoryDir() as part of MAPREDUCE-975.
+ * Version 29: Added reservedSlots, runningTasks and totalJobSubmissions
+ * to ClusterMetrics as part of MAPREDUCE-1048.
+ * Version 30: Job submission files are uploaded to a staging area under
+ * user home dir. JobTracker reads the required files from the
+ * staging area using user credentials passed via the rpc.
+ * Version 31: Added TokenStorage to submitJob
+ * Version 32: Added delegation tokens (add, renew, cancel)
+ * Version 33: Added JobACLs to JobStatus as part of MAPREDUCE-1307
+ * Version 34: Modified submitJob to use Credentials instead of TokenStorage.
+ * Version 35: Added the method getQueueAdmins(queueName) as part of
+ * MAPREDUCE-1664.
+ * Version 36: Added the method getJobTrackerStatus() as part of
+ * MAPREDUCE-2337.
+ * Version 37: More efficient serialization format for framework counters
+ * (MAPREDUCE-901)
+ */
+ public static final long versionID = 37L;
+
+ /**
+ * Allocate a name for the job.
+ * @return a unique job name for submitting jobs.
+ * @throws IOException
+ */
+ public JobID getNewJobID() throws IOException, InterruptedException;
+
+ /**
+ * Submit a Job for execution. Returns the latest profile for
+ * that job.
+ */
+ public JobStatus submitJob(JobID jobId, String jobSubmitDir, Credentials ts)
+ throws IOException, InterruptedException;
+
+ /**
+ * Get the current status of the cluster
+ *
+ * @return summary of the state of the cluster
+ */
+ public ClusterMetrics getClusterMetrics()
+ throws IOException, InterruptedException;
+
+ /**
+ * Get the JobTracker's status.
+ *
+ * @return {@link JobTrackerStatus} of the JobTracker
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public JobTrackerStatus getJobTrackerStatus() throws IOException,
+ InterruptedException;
+
+ public long getTaskTrackerExpiryInterval() throws IOException,
+ InterruptedException;
+
+ /**
+ * Get the administrators of the given job-queue.
+ * This method is for hadoop internal use only.
+ * @param queueName
+ * @return Queue administrators ACL for the queue to which job is
+ * submitted to
+ * @throws IOException
+ */
+ public AccessControlList getQueueAdmins(String queueName) throws IOException;
+
+ /**
+ * Kill the indicated job
+ */
+ public void killJob(JobID jobid) throws IOException, InterruptedException;
+
+ /**
+ * Set the priority of the specified job
+ * @param jobid ID of the job
+ * @param priority Priority to be set for the job
+ */
+ public void setJobPriority(JobID jobid, String priority)
+ throws IOException, InterruptedException;
+
+ /**
+ * Kill indicated task attempt.
+ * @param taskId the id of the task to kill.
+ * @param shouldFail if true the task is failed and added to failed tasks list, otherwise
+ * it is just killed, w/o affecting job failure status.
+ */
+ public boolean killTask(TaskAttemptID taskId, boolean shouldFail)
+ throws IOException, InterruptedException;
+
+ /**
+ * Grab a handle to a job that is already known to the JobTracker.
+ * @return Status of the job, or null if not found.
+ */
+ public JobStatus getJobStatus(JobID jobid)
+ throws IOException, InterruptedException;
+
+ /**
+ * Grab the current job counters
+ */
+ public Counters getJobCounters(JobID jobid)
+ throws IOException, InterruptedException;
+
+ /**
+ * Grab a bunch of info on the tasks that make up the job
+ */
+ public TaskReport[] getTaskReports(JobID jobid, TaskType type)
+ throws IOException, InterruptedException;
+
+ /**
+ * A MapReduce system always operates on a single filesystem. This
+ * function returns the fs name. ('local' if the localfs; 'addr:port'
+ * if dfs). The client can then copy files into the right locations
+ * prior to submitting the job.
+ */
+ public String getFilesystemName() throws IOException, InterruptedException;
+
+ /**
+ * Get all the jobs submitted.
+ * @return array of JobStatus for the submitted jobs
+ */
+ public JobStatus[] getAllJobs() throws IOException, InterruptedException;
+
+ /**
+ * Get task completion events for the jobid, starting from fromEventId.
+ * Returns empty array if no events are available.
+ * @param jobid job id
+ * @param fromEventId event id to start from.
+ * @param maxEvents the max number of events we want to look at
+ * @return array of task completion events.
+ * @throws IOException
+ */
+ public TaskCompletionEvent[] getTaskCompletionEvents(JobID jobid,
+ int fromEventId, int maxEvents) throws IOException, InterruptedException;
+
+ /**
+ * Get the diagnostics for a given task in a given job
+ * @param taskId the id of the task
+ * @return an array of the diagnostic messages
+ */
+ public String[] getTaskDiagnostics(TaskAttemptID taskId)
+ throws IOException, InterruptedException;
+
+ /**
+ * Get all active trackers in cluster.
+ * @return array of TaskTrackerInfo
+ */
+ public TaskTrackerInfo[] getActiveTrackers()
+ throws IOException, InterruptedException;
+
+ /**
+ * Get all blacklisted trackers in cluster.
+ * @return array of TaskTrackerInfo
+ */
+ public TaskTrackerInfo[] getBlacklistedTrackers()
+ throws IOException, InterruptedException;
+
+ /**
+ * Grab the jobtracker system directory path
+ * where job-specific files are to be placed.
+ *
+ * @return the system directory where job-specific files are to be placed.
+ */
+ public String getSystemDir() throws IOException, InterruptedException;
+
+ /**
+ * Get a hint from the JobTracker
+ * where job-specific files are to be placed.
+ *
+ * @return the directory where job-specific files are to be placed.
+ */
+ public String getStagingAreaDir() throws IOException, InterruptedException;
+
+ /**
+ * Gets the directory location of the completed job history files.
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public String getJobHistoryDir()
+ throws IOException, InterruptedException;
+
+ /**
+ * Gets set of Queues associated with the Job Tracker
+ *
+ * @return Array of the Queue Information Object
+ * @throws IOException
+ */
+ public QueueInfo[] getQueues() throws IOException, InterruptedException;
+
+ /**
+ * Gets scheduling information associated with the particular Job queue
+ *
+ * @param queueName Queue Name
+ * @return Scheduling Information of the Queue
+ * @throws IOException
+ */
+ public QueueInfo getQueue(String queueName)
+ throws IOException, InterruptedException;
+
+ /**
+ * Gets the Queue ACLs for current user
+ * @return array of QueueAclsInfo object for current user.
+ * @throws IOException
+ */
+ public QueueAclsInfo[] getQueueAclsForCurrentUser()
+ throws IOException, InterruptedException;
+
+ /**
+ * Gets the root level queues.
+ * @return array of JobQueueInfo object.
+ * @throws IOException
+ */
+ public QueueInfo[] getRootQueues() throws IOException, InterruptedException;
+
+ /**
+ * Returns immediate children of queueName.
+ * @param queueName
+ * @return array of JobQueueInfo which are children of queueName
+ * @throws IOException
+ */
+ public QueueInfo[] getChildQueues(String queueName)
+ throws IOException, InterruptedException;
+
+ /**
+ * Get a new delegation token.
+ * @param renewer the user other than the creator (if any) that can renew the
+ * token
+ * @return the new delegation token
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public
+ Token<DelegationTokenIdentifier> getDelegationToken(Text renewer
+ ) throws IOException,
+ InterruptedException;
+
+ /**
+ * Renew an existing delegation token
+ * @param token the token to renew
+ * @return the new expiration time
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public long renewDelegationToken(Token<DelegationTokenIdentifier> token
+ ) throws IOException,
+ InterruptedException;
+
+ /**
+ * Cancel a delegation token.
+ * @param token the token to cancel
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public void cancelDelegationToken(Token<DelegationTokenIdentifier> token
+ ) throws IOException,
+ InterruptedException;
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/protocol/ClientProtocolProvider.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/protocol/ClientProtocolProvider.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/protocol/ClientProtocolProvider.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/protocol/ClientProtocolProvider.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/protocol/package-info.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/protocol/package-info.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/protocol/package-info.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/protocol/package-info.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/security/SecureShuffleUtils.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SecureShuffleUtils.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/security/SecureShuffleUtils.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SecureShuffleUtils.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
new file mode 100644
index 0000000..a7d9192
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
@@ -0,0 +1,219 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.security;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.Master;
+import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+
+
+/**
+ * This class provides user facing APIs for transferring secrets from
+ * the job client to the tasks.
+ * The secrets can be stored just before submission of jobs and read during
+ * the task execution.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class TokenCache {
+
+ private static final Log LOG = LogFactory.getLog(TokenCache.class);
+
+
+ /**
+ * auxiliary method to get user's secret keys..
+ * @param alias
+ * @return secret key from the storage
+ */
+ public static byte[] getSecretKey(Credentials credentials, Text alias) {
+ if(credentials == null)
+ return null;
+ return credentials.getSecretKey(alias);
+ }
+
+ /**
+ * Convenience method to obtain delegation tokens from namenodes
+ * corresponding to the paths passed.
+ * @param credentials
+ * @param ps array of paths
+ * @param conf configuration
+ * @throws IOException
+ */
+ public static void obtainTokensForNamenodes(Credentials credentials,
+ Path[] ps, Configuration conf) throws IOException {
+ if (!UserGroupInformation.isSecurityEnabled()) {
+ return;
+ }
+ obtainTokensForNamenodesInternal(credentials, ps, conf);
+ }
+
+ static void obtainTokensForNamenodesInternal(Credentials credentials,
+ Path[] ps, Configuration conf) throws IOException {
+ for(Path p: ps) {
+ FileSystem fs = FileSystem.get(p.toUri(), conf);
+ obtainTokensForNamenodesInternal(fs, credentials, conf);
+ }
+ }
+
+ /**
+ * get delegation token for a specific FS
+ * @param fs
+ * @param credentials
+ * @param p
+ * @param conf
+ * @throws IOException
+ */
+ static void obtainTokensForNamenodesInternal(FileSystem fs,
+ Credentials credentials, Configuration conf) throws IOException {
+ String delegTokenRenewer = Master.getMasterPrincipal(conf);
+ if (delegTokenRenewer == null || delegTokenRenewer.length() == 0) {
+ throw new IOException(
+ "Can't get JobTracker Kerberos principal for use as renewer");
+ }
+ boolean readFile = true;
+
+ String fsName = fs.getCanonicalServiceName();
+ if (TokenCache.getDelegationToken(credentials, fsName) == null) {
+ //TODO: Need to come up with a better place to put
+ //this block of code to do with reading the file
+ if (readFile) {
+ readFile = false;
+ String binaryTokenFilename =
+ conf.get("mapreduce.job.credentials.binary");
+ if (binaryTokenFilename != null) {
+ Credentials binary;
+ try {
+ binary = Credentials.readTokenStorageFile(
+ new Path("file:///" + binaryTokenFilename), conf);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ credentials.addAll(binary);
+ }
+ if (TokenCache.getDelegationToken(credentials, fsName) != null) {
+ LOG.debug("DT for " + fsName + " is already present");
+ return;
+ }
+ }
+ List<Token<?>> tokens = fs.getDelegationTokens(delegTokenRenewer);
+ if (tokens != null) {
+ for (Token<?> token : tokens) {
+ credentials.addToken(token.getService(), token);
+ LOG.info("Got dt for " + fs.getUri() + ";uri="+ fsName +
+ ";t.service="+token.getService());
+ }
+ }
+ //Call getDelegationToken as well for now - for FS implementations
+ // which may not have implmented getDelegationTokens (hftp)
+ Token<?> token = fs.getDelegationToken(delegTokenRenewer);
+ if (token != null) {
+ Text fsNameText = new Text(fsName);
+ token.setService(fsNameText);
+ credentials.addToken(fsNameText, token);
+ LOG.info("Got dt for " + fs.getUri() + ";uri="+ fsName +
+ ";t.service="+token.getService());
+ }
+ }
+ }
+
+ /**
+ * file name used on HDFS for generated job token
+ */
+ @InterfaceAudience.Private
+ public static final String JOB_TOKEN_HDFS_FILE = "jobToken";
+
+ /**
+ * conf setting for job tokens cache file name
+ */
+ @InterfaceAudience.Private
+ public static final String JOB_TOKENS_FILENAME = "mapreduce.job.jobTokenFile";
+ private static final Text JOB_TOKEN = new Text("ShuffleAndJobToken");
+
+ /**
+ *
+ * @param namenode
+ * @return delegation token
+ */
+ @SuppressWarnings("unchecked")
+ @InterfaceAudience.Private
+ public static Token<DelegationTokenIdentifier> getDelegationToken(
+ Credentials credentials, String namenode) {
+ return (Token<DelegationTokenIdentifier>) credentials.getToken(new Text(
+ namenode));
+ }
+
+ /**
+ * load job token from a file
+ * @param conf
+ * @throws IOException
+ */
+ @InterfaceAudience.Private
+ public static Credentials loadTokens(String jobTokenFile, JobConf conf)
+ throws IOException {
+ Path localJobTokenFile = new Path ("file:///" + jobTokenFile);
+
+ Credentials ts = Credentials.readTokenStorageFile(localJobTokenFile, conf);
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Task: Loaded jobTokenFile from: "+
+ localJobTokenFile.toUri().getPath()
+ +"; num of sec keys = " + ts.numberOfSecretKeys() +
+ " Number of tokens " + ts.numberOfTokens());
+ }
+ return ts;
+ }
+ /**
+ * store job token
+ * @param t
+ */
+ @InterfaceAudience.Private
+ public static void setJobToken(Token<? extends TokenIdentifier> t,
+ Credentials credentials) {
+ credentials.addToken(JOB_TOKEN, t);
+ }
+ /**
+ *
+ * @return job token
+ */
+ @SuppressWarnings("unchecked")
+ @InterfaceAudience.Private
+ public static Token<JobTokenIdentifier> getJobToken(Credentials credentials) {
+ return (Token<JobTokenIdentifier>) credentials.getToken(JOB_TOKEN);
+ }
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/security/token/JobTokenIdentifier.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/JobTokenIdentifier.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/security/token/JobTokenIdentifier.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/JobTokenIdentifier.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/security/token/JobTokenSecretManager.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/JobTokenSecretManager.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/security/token/JobTokenSecretManager.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/JobTokenSecretManager.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/security/token/JobTokenSelector.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/JobTokenSelector.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/security/token/JobTokenSelector.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/JobTokenSelector.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenIdentifier.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenIdentifier.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenIdentifier.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenIdentifier.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenSecretManager.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenSecretManager.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenSecretManager.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenSecretManager.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenSelector.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenSelector.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenSelector.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenSelector.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/security/token/delegation/package-info.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/package-info.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/security/token/delegation/package-info.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/package-info.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/security/token/package-info.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/package-info.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/security/token/package-info.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/package-info.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/server/jobtracker/JTConfig.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/server/jobtracker/JTConfig.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/server/jobtracker/JTConfig.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/server/jobtracker/JTConfig.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/server/tasktracker/TTConfig.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/server/tasktracker/TTConfig.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/server/tasktracker/TTConfig.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/server/tasktracker/TTConfig.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/split/JobSplit.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/JobSplit.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/split/JobSplit.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/JobSplit.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/split/JobSplitWriter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/JobSplitWriter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/split/JobSplitWriter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/JobSplitWriter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/split/SplitMetaInfoReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/SplitMetaInfoReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/split/SplitMetaInfoReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/SplitMetaInfoReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/split/package-info.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/package-info.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/split/package-info.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/package-info.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/JobContextImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/JobContextImpl.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/task/JobContextImpl.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/JobContextImpl.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/MapContextImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/MapContextImpl.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/task/MapContextImpl.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/MapContextImpl.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/ReduceContextImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/ReduceContextImpl.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/task/ReduceContextImpl.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/ReduceContextImpl.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/TaskAttemptContextImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/TaskAttemptContextImpl.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/task/TaskAttemptContextImpl.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/TaskAttemptContextImpl.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/TaskInputOutputContextImpl.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/TaskInputOutputContextImpl.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/task/TaskInputOutputContextImpl.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/TaskInputOutputContextImpl.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/package-info.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/package-info.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/task/package-info.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/package-info.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java
new file mode 100644
index 0000000..8f0dad7
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java
@@ -0,0 +1,172 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.task.reduce;
+
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.mapred.MapTaskCompletionEventsUpdate;
+import org.apache.hadoop.mapred.TaskCompletionEvent;
+import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+
+class EventFetcher<K,V> extends Thread {
+ private static final long SLEEP_TIME = 1000;
+ private static final int MAX_EVENTS_TO_FETCH = 10000;
+ private static final int MAX_RETRIES = 10;
+ private static final int RETRY_PERIOD = 5000;
+ private static final Log LOG = LogFactory.getLog(EventFetcher.class);
+
+ private final TaskAttemptID reduce;
+ private final TaskUmbilicalProtocol umbilical;
+ private final ShuffleScheduler<K,V> scheduler;
+ private int fromEventId = 0;
+ private ExceptionReporter exceptionReporter = null;
+
+ private int maxMapRuntime = 0;
+
+ public EventFetcher(TaskAttemptID reduce,
+ TaskUmbilicalProtocol umbilical,
+ ShuffleScheduler<K,V> scheduler,
+ ExceptionReporter reporter) {
+ setName("EventFetcher for fetching Map Completion Events");
+ setDaemon(true);
+ this.reduce = reduce;
+ this.umbilical = umbilical;
+ this.scheduler = scheduler;
+ exceptionReporter = reporter;
+ }
+
+ @Override
+ public void run() {
+ int failures = 0;
+ LOG.info(reduce + " Thread started: " + getName());
+
+ try {
+ while (true) {
+ try {
+ int numNewMaps = getMapCompletionEvents();
+ failures = 0;
+ if (numNewMaps > 0) {
+ LOG.info(reduce + ": " + "Got " + numNewMaps + " new map-outputs");
+ }
+ LOG.debug("GetMapEventsThread about to sleep for " + SLEEP_TIME);
+ Thread.sleep(SLEEP_TIME);
+ } catch (IOException ie) {
+ LOG.info("Exception in getting events", ie);
+ // check to see whether to abort
+ if (++failures >= MAX_RETRIES) {
+ throw new IOException("too many failures downloading events", ie);
+ }
+ // sleep for a bit
+ Thread.sleep(RETRY_PERIOD);
+ }
+ }
+ } catch (InterruptedException e) {
+ return;
+ } catch (Throwable t) {
+ exceptionReporter.reportException(t);
+ return;
+ }
+ }
+
+ /**
+ * Queries the {@link TaskTracker} for a set of map-completion events
+ * from a given event ID.
+ * @throws IOException
+ */
+ private int getMapCompletionEvents() throws IOException {
+
+ int numNewMaps = 0;
+
+ MapTaskCompletionEventsUpdate update =
+ umbilical.getMapCompletionEvents((org.apache.hadoop.mapred.JobID)
+ reduce.getJobID(),
+ fromEventId,
+ MAX_EVENTS_TO_FETCH,
+ (org.apache.hadoop.mapred.TaskAttemptID)
+ reduce);
+ TaskCompletionEvent events[] = update.getMapTaskCompletionEvents();
+ LOG.debug("Got " + events.length + " map completion events from " +
+ fromEventId);
+
+ // Check if the reset is required.
+ // Since there is no ordering of the task completion events at the
+ // reducer, the only option to sync with the new jobtracker is to reset
+ // the events index
+ if (update.shouldReset()) {
+ fromEventId = 0;
+ scheduler.resetKnownMaps();
+ }
+
+ // Update the last seen event ID
+ fromEventId += events.length;
+
+ // Process the TaskCompletionEvents:
+ // 1. Save the SUCCEEDED maps in knownOutputs to fetch the outputs.
+ // 2. Save the OBSOLETE/FAILED/KILLED maps in obsoleteOutputs to stop
+ // fetching from those maps.
+ // 3. Remove TIPFAILED maps from neededOutputs since we don't need their
+ // outputs at all.
+ for (TaskCompletionEvent event : events) {
+ switch (event.getTaskStatus()) {
+ case SUCCEEDED:
+ URI u = getBaseURI(event.getTaskTrackerHttp());
+ scheduler.addKnownMapOutput(u.getHost() + ":" + u.getPort(),
+ u.toString(),
+ event.getTaskAttemptId());
+ numNewMaps ++;
+ int duration = event.getTaskRunTime();
+ if (duration > maxMapRuntime) {
+ maxMapRuntime = duration;
+ scheduler.informMaxMapRunTime(maxMapRuntime);
+ }
+ break;
+ case FAILED:
+ case KILLED:
+ case OBSOLETE:
+ scheduler.obsoleteMapOutput(event.getTaskAttemptId());
+ LOG.info("Ignoring obsolete output of " + event.getTaskStatus() +
+ " map-task: '" + event.getTaskAttemptId() + "'");
+ break;
+ case TIPFAILED:
+ scheduler.tipFailed(event.getTaskAttemptId().getTaskID());
+ LOG.info("Ignoring output of failed map TIP: '" +
+ event.getTaskAttemptId() + "'");
+ break;
+ }
+ }
+ return numNewMaps;
+ }
+
+ private URI getBaseURI(String url) {
+ StringBuffer baseUrl = new StringBuffer(url);
+ if (!url.endsWith("/")) {
+ baseUrl.append("/");
+ }
+ baseUrl.append("mapOutput?job=");
+ baseUrl.append(reduce.getJobID());
+ baseUrl.append("&reduce=");
+ baseUrl.append(reduce.getTaskID().getId());
+ baseUrl.append("&map=");
+ URI u = URI.create(baseUrl.toString());
+ return u;
+ }
+}
\ No newline at end of file
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/ExceptionReporter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ExceptionReporter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/ExceptionReporter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ExceptionReporter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryReader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryReader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryReader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryReader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryWriter.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryWriter.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryWriter.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryWriter.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/MapHost.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MapHost.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/MapHost.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MapHost.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MapOutput.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MapOutput.java
new file mode 100644
index 0000000..aab0ccc
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MapOutput.java
@@ -0,0 +1,226 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.task.reduce;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Comparator;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.BoundedByteArrayOutputStream;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.MapOutputFile;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+
+class MapOutput<K,V> {
+ private static final Log LOG = LogFactory.getLog(MapOutput.class);
+ private static AtomicInteger ID = new AtomicInteger(0);
+
+ public static enum Type {
+ WAIT,
+ MEMORY,
+ DISK
+ }
+
+ private final int id;
+
+ private final MergeManager<K,V> merger;
+ private final TaskAttemptID mapId;
+
+ private final long size;
+
+ private final byte[] memory;
+ private BoundedByteArrayOutputStream byteStream;
+
+ private final FileSystem localFS;
+ private final Path tmpOutputPath;
+ private final Path outputPath;
+ private final OutputStream disk;
+
+ private final Type type;
+
+ private final boolean primaryMapOutput;
+
+ MapOutput(TaskAttemptID mapId, MergeManager<K,V> merger, long size,
+ JobConf conf, LocalDirAllocator localDirAllocator,
+ int fetcher, boolean primaryMapOutput, MapOutputFile mapOutputFile)
+ throws IOException {
+ this.id = ID.incrementAndGet();
+ this.mapId = mapId;
+ this.merger = merger;
+
+ type = Type.DISK;
+
+ memory = null;
+ byteStream = null;
+
+ this.size = size;
+
+ this.localFS = FileSystem.getLocal(conf);
+ outputPath =
+ mapOutputFile.getInputFileForWrite(mapId.getTaskID(),size);
+ tmpOutputPath = outputPath.suffix(String.valueOf(fetcher));
+
+ disk = localFS.create(tmpOutputPath);
+
+ this.primaryMapOutput = primaryMapOutput;
+ }
+
+ MapOutput(TaskAttemptID mapId, MergeManager<K,V> merger, int size,
+ boolean primaryMapOutput) {
+ this.id = ID.incrementAndGet();
+ this.mapId = mapId;
+ this.merger = merger;
+
+ type = Type.MEMORY;
+ byteStream = new BoundedByteArrayOutputStream(size);
+ memory = byteStream.getBuffer();
+
+ this.size = size;
+
+ localFS = null;
+ disk = null;
+ outputPath = null;
+ tmpOutputPath = null;
+
+ this.primaryMapOutput = primaryMapOutput;
+ }
+
+ public MapOutput(TaskAttemptID mapId) {
+ this.id = ID.incrementAndGet();
+ this.mapId = mapId;
+
+ type = Type.WAIT;
+ merger = null;
+ memory = null;
+ byteStream = null;
+
+ size = -1;
+
+ localFS = null;
+ disk = null;
+ outputPath = null;
+ tmpOutputPath = null;
+
+ this.primaryMapOutput = false;
+}
+
+ public boolean isPrimaryMapOutput() {
+ return primaryMapOutput;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj instanceof MapOutput) {
+ return id == ((MapOutput)obj).id;
+ }
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ return id;
+ }
+
+ public Path getOutputPath() {
+ return outputPath;
+ }
+
+ public byte[] getMemory() {
+ return memory;
+ }
+
+ public BoundedByteArrayOutputStream getArrayStream() {
+ return byteStream;
+ }
+
+ public OutputStream getDisk() {
+ return disk;
+ }
+
+ public TaskAttemptID getMapId() {
+ return mapId;
+ }
+
+ public Type getType() {
+ return type;
+ }
+
+ public long getSize() {
+ return size;
+ }
+
+ public void commit() throws IOException {
+ if (type == Type.MEMORY) {
+ merger.closeInMemoryFile(this);
+ } else if (type == Type.DISK) {
+ localFS.rename(tmpOutputPath, outputPath);
+ merger.closeOnDiskFile(outputPath);
+ } else {
+ throw new IOException("Cannot commit MapOutput of type WAIT!");
+ }
+ }
+
+ public void abort() {
+ if (type == Type.MEMORY) {
+ merger.unreserve(memory.length);
+ } else if (type == Type.DISK) {
+ try {
+ localFS.delete(tmpOutputPath, false);
+ } catch (IOException ie) {
+ LOG.info("failure to clean up " + tmpOutputPath, ie);
+ }
+ } else {
+ throw new IllegalArgumentException
+ ("Cannot commit MapOutput with of type WAIT!");
+ }
+ }
+
+ public String toString() {
+ return "MapOutput(" + mapId + ", " + type + ")";
+ }
+
+ public static class MapOutputComparator<K, V>
+ implements Comparator<MapOutput<K, V>> {
+ public int compare(MapOutput<K, V> o1, MapOutput<K, V> o2) {
+ if (o1.id == o2.id) {
+ return 0;
+ }
+
+ if (o1.size < o2.size) {
+ return -1;
+ } else if (o1.size > o2.size) {
+ return 1;
+ }
+
+ if (o1.id < o2.id) {
+ return -1;
+ } else {
+ return 1;
+
+ }
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java
new file mode 100644
index 0000000..edf332a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java
@@ -0,0 +1,769 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.task.reduce;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.ChecksumFileSystem;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.RawComparator;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.mapred.Counters;
+import org.apache.hadoop.mapred.IFile;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.MapOutputFile;
+import org.apache.hadoop.mapred.Merger;
+import org.apache.hadoop.mapred.RawKeyValueIterator;
+import org.apache.hadoop.mapred.Reducer;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.mapred.Task;
+import org.apache.hadoop.mapred.Counters.Counter;
+import org.apache.hadoop.mapred.IFile.Reader;
+import org.apache.hadoop.mapred.IFile.Writer;
+import org.apache.hadoop.mapred.Merger.Segment;
+import org.apache.hadoop.mapred.Task.CombineOutputCollector;
+import org.apache.hadoop.mapred.Task.CombineValuesIterator;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.TaskID;
+import org.apache.hadoop.mapreduce.task.reduce.MapOutput.MapOutputComparator;
+import org.apache.hadoop.util.Progress;
+import org.apache.hadoop.util.ReflectionUtils;
+
+@SuppressWarnings(value={"unchecked", "deprecation"})
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class MergeManager<K, V> {
+
+ private static final Log LOG = LogFactory.getLog(MergeManager.class);
+
+ /* Maximum percentage of the in-memory limit that a single shuffle can
+ * consume*/
+ private static final float MAX_SINGLE_SHUFFLE_SEGMENT_FRACTION = 0.25f;
+
+ private final TaskAttemptID reduceId;
+
+ private final JobConf jobConf;
+ private final FileSystem localFS;
+ private final FileSystem rfs;
+ private final LocalDirAllocator localDirAllocator;
+
+ protected MapOutputFile mapOutputFile;
+
+ Set<MapOutput<K, V>> inMemoryMergedMapOutputs =
+ new TreeSet<MapOutput<K,V>>(new MapOutputComparator<K, V>());
+ private final IntermediateMemoryToMemoryMerger memToMemMerger;
+
+ Set<MapOutput<K, V>> inMemoryMapOutputs =
+ new TreeSet<MapOutput<K,V>>(new MapOutputComparator<K, V>());
+ private final InMemoryMerger inMemoryMerger;
+
+ Set<Path> onDiskMapOutputs = new TreeSet<Path>();
+ private final OnDiskMerger onDiskMerger;
+
+ private final long memoryLimit;
+ private long usedMemory;
+ private final long maxSingleShuffleLimit;
+
+ private final int memToMemMergeOutputsThreshold;
+ private final long mergeThreshold;
+
+ private final int ioSortFactor;
+
+ private final Reporter reporter;
+ private final ExceptionReporter exceptionReporter;
+
+ /**
+ * Combiner class to run during in-memory merge, if defined.
+ */
+ private final Class<? extends Reducer> combinerClass;
+
+ /**
+ * Resettable collector used for combine.
+ */
+ private final CombineOutputCollector<K,V> combineCollector;
+
+ private final Counters.Counter spilledRecordsCounter;
+
+ private final Counters.Counter reduceCombineInputCounter;
+
+ private final Counters.Counter mergedMapOutputsCounter;
+
+ private final CompressionCodec codec;
+
+ private final Progress mergePhase;
+
+ public MergeManager(TaskAttemptID reduceId, JobConf jobConf,
+ FileSystem localFS,
+ LocalDirAllocator localDirAllocator,
+ Reporter reporter,
+ CompressionCodec codec,
+ Class<? extends Reducer> combinerClass,
+ CombineOutputCollector<K,V> combineCollector,
+ Counters.Counter spilledRecordsCounter,
+ Counters.Counter reduceCombineInputCounter,
+ Counters.Counter mergedMapOutputsCounter,
+ ExceptionReporter exceptionReporter,
+ Progress mergePhase, MapOutputFile mapOutputFile) {
+ this.reduceId = reduceId;
+ this.jobConf = jobConf;
+ this.localDirAllocator = localDirAllocator;
+ this.exceptionReporter = exceptionReporter;
+
+ this.reporter = reporter;
+ this.codec = codec;
+ this.combinerClass = combinerClass;
+ this.combineCollector = combineCollector;
+ this.reduceCombineInputCounter = reduceCombineInputCounter;
+ this.spilledRecordsCounter = spilledRecordsCounter;
+ this.mergedMapOutputsCounter = mergedMapOutputsCounter;
+ this.mapOutputFile = mapOutputFile;
+ this.mapOutputFile.setConf(jobConf);
+
+ this.localFS = localFS;
+ this.rfs = ((LocalFileSystem)localFS).getRaw();
+
+ final float maxInMemCopyUse =
+ jobConf.getFloat(MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT, 0.90f);
+ if (maxInMemCopyUse > 1.0 || maxInMemCopyUse < 0.0) {
+ throw new IllegalArgumentException("Invalid value for " +
+ MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT + ": " +
+ maxInMemCopyUse);
+ }
+
+ // Allow unit tests to fix Runtime memory
+ this.memoryLimit =
+ (long)(jobConf.getLong(MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES,
+ Math.min(Runtime.getRuntime().maxMemory(), Integer.MAX_VALUE))
+ * maxInMemCopyUse);
+
+ this.ioSortFactor = jobConf.getInt(MRJobConfig.IO_SORT_FACTOR, 100);
+
+ this.maxSingleShuffleLimit =
+ (long)(memoryLimit * MAX_SINGLE_SHUFFLE_SEGMENT_FRACTION);
+ this.memToMemMergeOutputsThreshold =
+ jobConf.getInt(MRJobConfig.REDUCE_MEMTOMEM_THRESHOLD, ioSortFactor);
+ this.mergeThreshold = (long)(this.memoryLimit *
+ jobConf.getFloat(MRJobConfig.SHUFFLE_MERGE_EPRCENT,
+ 0.90f));
+ LOG.info("MergerManager: memoryLimit=" + memoryLimit + ", " +
+ "maxSingleShuffleLimit=" + maxSingleShuffleLimit + ", " +
+ "mergeThreshold=" + mergeThreshold + ", " +
+ "ioSortFactor=" + ioSortFactor + ", " +
+ "memToMemMergeOutputsThreshold=" + memToMemMergeOutputsThreshold);
+
+ boolean allowMemToMemMerge =
+ jobConf.getBoolean(MRJobConfig.REDUCE_MEMTOMEM_ENABLED, false);
+ if (allowMemToMemMerge) {
+ this.memToMemMerger =
+ new IntermediateMemoryToMemoryMerger(this,
+ memToMemMergeOutputsThreshold);
+ this.memToMemMerger.start();
+ } else {
+ this.memToMemMerger = null;
+ }
+
+ this.inMemoryMerger = new InMemoryMerger(this);
+ this.inMemoryMerger.start();
+
+ this.onDiskMerger = new OnDiskMerger(this);
+ this.onDiskMerger.start();
+
+ this.mergePhase = mergePhase;
+ }
+
+
+ TaskAttemptID getReduceId() {
+ return reduceId;
+ }
+
+ public void waitForInMemoryMerge() throws InterruptedException {
+ inMemoryMerger.waitForMerge();
+ }
+
+ private boolean canShuffleToMemory(long requestedSize) {
+ return (requestedSize < maxSingleShuffleLimit);
+ }
+
+ final private MapOutput<K,V> stallShuffle = new MapOutput<K,V>(null);
+
+ public synchronized MapOutput<K,V> reserve(TaskAttemptID mapId,
+ long requestedSize,
+ int fetcher
+ ) throws IOException {
+ if (!canShuffleToMemory(requestedSize)) {
+ LOG.info(mapId + ": Shuffling to disk since " + requestedSize +
+ " is greater than maxSingleShuffleLimit (" +
+ maxSingleShuffleLimit + ")");
+ return new MapOutput<K,V>(mapId, this, requestedSize, jobConf,
+ localDirAllocator, fetcher, true,
+ mapOutputFile);
+ }
+
+ // Stall shuffle if we are above the memory limit
+
+ // It is possible that all threads could just be stalling and not make
+ // progress at all. This could happen when:
+ //
+ // requested size is causing the used memory to go above limit &&
+ // requested size < singleShuffleLimit &&
+ // current used size < mergeThreshold (merge will not get triggered)
+ //
+ // To avoid this from happening, we allow exactly one thread to go past
+ // the memory limit. We check (usedMemory > memoryLimit) and not
+ // (usedMemory + requestedSize > memoryLimit). When this thread is done
+ // fetching, this will automatically trigger a merge thereby unlocking
+ // all the stalled threads
+
+ if (usedMemory > memoryLimit) {
+ LOG.debug(mapId + ": Stalling shuffle since usedMemory (" + usedMemory +
+ ") is greater than memoryLimit (" + memoryLimit + ")");
+
+ return stallShuffle;
+ }
+
+ // Allow the in-memory shuffle to progress
+ LOG.debug(mapId + ": Proceeding with shuffle since usedMemory (" +
+ usedMemory +
+ ") is lesser than memoryLimit (" + memoryLimit + ")");
+ return unconditionalReserve(mapId, requestedSize, true);
+ }
+
+ /**
+ * Unconditional Reserve is used by the Memory-to-Memory thread
+ * @return
+ */
+ private synchronized MapOutput<K, V> unconditionalReserve(
+ TaskAttemptID mapId, long requestedSize, boolean primaryMapOutput) {
+ usedMemory += requestedSize;
+ return new MapOutput<K,V>(mapId, this, (int)requestedSize,
+ primaryMapOutput);
+ }
+
+ synchronized void unreserve(long size) {
+ usedMemory -= size;
+ }
+
+ public synchronized void closeInMemoryFile(MapOutput<K,V> mapOutput) {
+ inMemoryMapOutputs.add(mapOutput);
+ LOG.info("closeInMemoryFile -> map-output of size: " + mapOutput.getSize()
+ + ", inMemoryMapOutputs.size() -> " + inMemoryMapOutputs.size());
+
+ synchronized (inMemoryMerger) {
+ if (!inMemoryMerger.isInProgress() && usedMemory >= mergeThreshold) {
+ LOG.info("Starting inMemoryMerger's merge since usedMemory=" +
+ usedMemory + " > mergeThreshold=" + mergeThreshold);
+ inMemoryMapOutputs.addAll(inMemoryMergedMapOutputs);
+ inMemoryMergedMapOutputs.clear();
+ inMemoryMerger.startMerge(inMemoryMapOutputs);
+ }
+ }
+
+ if (memToMemMerger != null) {
+ synchronized (memToMemMerger) {
+ if (!memToMemMerger.isInProgress() &&
+ inMemoryMapOutputs.size() >= memToMemMergeOutputsThreshold) {
+ memToMemMerger.startMerge(inMemoryMapOutputs);
+ }
+ }
+ }
+ }
+
+
+ public synchronized void closeInMemoryMergedFile(MapOutput<K,V> mapOutput) {
+ inMemoryMergedMapOutputs.add(mapOutput);
+ LOG.info("closeInMemoryMergedFile -> size: " + mapOutput.getSize() +
+ ", inMemoryMergedMapOutputs.size() -> " +
+ inMemoryMergedMapOutputs.size());
+ }
+
+ public synchronized void closeOnDiskFile(Path file) {
+ onDiskMapOutputs.add(file);
+
+ synchronized (onDiskMerger) {
+ if (!onDiskMerger.isInProgress() &&
+ onDiskMapOutputs.size() >= (2 * ioSortFactor - 1)) {
+ onDiskMerger.startMerge(onDiskMapOutputs);
+ }
+ }
+ }
+
+ public RawKeyValueIterator close() throws Throwable {
+ // Wait for on-going merges to complete
+ if (memToMemMerger != null) {
+ memToMemMerger.close();
+ }
+ inMemoryMerger.close();
+ onDiskMerger.close();
+
+ List<MapOutput<K, V>> memory =
+ new ArrayList<MapOutput<K, V>>(inMemoryMergedMapOutputs);
+ memory.addAll(inMemoryMapOutputs);
+ List<Path> disk = new ArrayList<Path>(onDiskMapOutputs);
+ return finalMerge(jobConf, rfs, memory, disk);
+ }
+
+ private class IntermediateMemoryToMemoryMerger
+ extends MergeThread<MapOutput<K, V>, K, V> {
+
+ public IntermediateMemoryToMemoryMerger(MergeManager<K, V> manager,
+ int mergeFactor) {
+ super(manager, mergeFactor, exceptionReporter);
+ setName("InMemoryMerger - Thread to do in-memory merge of in-memory " +
+ "shuffled map-outputs");
+ setDaemon(true);
+ }
+
+ @Override
+ public void merge(List<MapOutput<K, V>> inputs) throws IOException {
+ if (inputs == null || inputs.size() == 0) {
+ return;
+ }
+
+ TaskAttemptID dummyMapId = inputs.get(0).getMapId();
+ List<Segment<K, V>> inMemorySegments = new ArrayList<Segment<K, V>>();
+ long mergeOutputSize =
+ createInMemorySegments(inputs, inMemorySegments, 0);
+ int noInMemorySegments = inMemorySegments.size();
+
+ MapOutput<K, V> mergedMapOutputs =
+ unconditionalReserve(dummyMapId, mergeOutputSize, false);
+
+ Writer<K, V> writer =
+ new InMemoryWriter<K, V>(mergedMapOutputs.getArrayStream());
+
+ LOG.info("Initiating Memory-to-Memory merge with " + noInMemorySegments +
+ " segments of total-size: " + mergeOutputSize);
+
+ RawKeyValueIterator rIter =
+ Merger.merge(jobConf, rfs,
+ (Class<K>)jobConf.getMapOutputKeyClass(),
+ (Class<V>)jobConf.getMapOutputValueClass(),
+ inMemorySegments, inMemorySegments.size(),
+ new Path(reduceId.toString()),
+ (RawComparator<K>)jobConf.getOutputKeyComparator(),
+ reporter, null, null, null);
+ Merger.writeFile(rIter, writer, reporter, jobConf);
+ writer.close();
+
+ LOG.info(reduceId +
+ " Memory-to-Memory merge of the " + noInMemorySegments +
+ " files in-memory complete.");
+
+ // Note the output of the merge
+ closeInMemoryMergedFile(mergedMapOutputs);
+ }
+ }
+
+ private class InMemoryMerger extends MergeThread<MapOutput<K,V>, K,V> {
+
+ public InMemoryMerger(MergeManager<K, V> manager) {
+ super(manager, Integer.MAX_VALUE, exceptionReporter);
+ setName
+ ("InMemoryMerger - Thread to merge in-memory shuffled map-outputs");
+ setDaemon(true);
+ }
+
+ @Override
+ public void merge(List<MapOutput<K,V>> inputs) throws IOException {
+ if (inputs == null || inputs.size() == 0) {
+ return;
+ }
+
+ //name this output file same as the name of the first file that is
+ //there in the current list of inmem files (this is guaranteed to
+ //be absent on the disk currently. So we don't overwrite a prev.
+ //created spill). Also we need to create the output file now since
+ //it is not guaranteed that this file will be present after merge
+ //is called (we delete empty files as soon as we see them
+ //in the merge method)
+
+ //figure out the mapId
+ TaskAttemptID mapId = inputs.get(0).getMapId();
+ TaskID mapTaskId = mapId.getTaskID();
+
+ List<Segment<K, V>> inMemorySegments = new ArrayList<Segment<K, V>>();
+ long mergeOutputSize =
+ createInMemorySegments(inputs, inMemorySegments,0);
+ int noInMemorySegments = inMemorySegments.size();
+
+ Path outputPath =
+ mapOutputFile.getInputFileForWrite(mapTaskId,
+ mergeOutputSize).suffix(
+ Task.MERGED_OUTPUT_PREFIX);
+
+ Writer<K,V> writer =
+ new Writer<K,V>(jobConf, rfs, outputPath,
+ (Class<K>) jobConf.getMapOutputKeyClass(),
+ (Class<V>) jobConf.getMapOutputValueClass(),
+ codec, null);
+
+ RawKeyValueIterator rIter = null;
+ try {
+ LOG.info("Initiating in-memory merge with " + noInMemorySegments +
+ " segments...");
+
+ rIter = Merger.merge(jobConf, rfs,
+ (Class<K>)jobConf.getMapOutputKeyClass(),
+ (Class<V>)jobConf.getMapOutputValueClass(),
+ inMemorySegments, inMemorySegments.size(),
+ new Path(reduceId.toString()),
+ (RawComparator<K>)jobConf.getOutputKeyComparator(),
+ reporter, spilledRecordsCounter, null, null);
+
+ if (null == combinerClass) {
+ Merger.writeFile(rIter, writer, reporter, jobConf);
+ } else {
+ combineCollector.setWriter(writer);
+ combineAndSpill(rIter, reduceCombineInputCounter);
+ }
+ writer.close();
+
+ LOG.info(reduceId +
+ " Merge of the " + noInMemorySegments +
+ " files in-memory complete." +
+ " Local file is " + outputPath + " of size " +
+ localFS.getFileStatus(outputPath).getLen());
+ } catch (IOException e) {
+ //make sure that we delete the ondisk file that we created
+ //earlier when we invoked cloneFileAttributes
+ localFS.delete(outputPath, true);
+ throw e;
+ }
+
+ // Note the output of the merge
+ closeOnDiskFile(outputPath);
+ }
+
+ }
+
+ private class OnDiskMerger extends MergeThread<Path,K,V> {
+
+ public OnDiskMerger(MergeManager<K, V> manager) {
+ super(manager, Integer.MAX_VALUE, exceptionReporter);
+ setName("OnDiskMerger - Thread to merge on-disk map-outputs");
+ setDaemon(true);
+ }
+
+ @Override
+ public void merge(List<Path> inputs) throws IOException {
+ // sanity check
+ if (inputs == null || inputs.isEmpty()) {
+ LOG.info("No ondisk files to merge...");
+ return;
+ }
+
+ long approxOutputSize = 0;
+ int bytesPerSum =
+ jobConf.getInt("io.bytes.per.checksum", 512);
+
+ LOG.info("OnDiskMerger: We have " + inputs.size() +
+ " map outputs on disk. Triggering merge...");
+
+ // 1. Prepare the list of files to be merged.
+ for (Path file : inputs) {
+ approxOutputSize += localFS.getFileStatus(file).getLen();
+ }
+
+ // add the checksum length
+ approxOutputSize +=
+ ChecksumFileSystem.getChecksumLength(approxOutputSize, bytesPerSum);
+
+ // 2. Start the on-disk merge process
+ Path outputPath =
+ localDirAllocator.getLocalPathForWrite(inputs.get(0).toString(),
+ approxOutputSize, jobConf).suffix(Task.MERGED_OUTPUT_PREFIX);
+ Writer<K,V> writer =
+ new Writer<K,V>(jobConf, rfs, outputPath,
+ (Class<K>) jobConf.getMapOutputKeyClass(),
+ (Class<V>) jobConf.getMapOutputValueClass(),
+ codec, null);
+ RawKeyValueIterator iter = null;
+ Path tmpDir = new Path(reduceId.toString());
+ try {
+ iter = Merger.merge(jobConf, rfs,
+ (Class<K>) jobConf.getMapOutputKeyClass(),
+ (Class<V>) jobConf.getMapOutputValueClass(),
+ codec, inputs.toArray(new Path[inputs.size()]),
+ true, ioSortFactor, tmpDir,
+ (RawComparator<K>) jobConf.getOutputKeyComparator(),
+ reporter, spilledRecordsCounter, null,
+ mergedMapOutputsCounter, null);
+
+ Merger.writeFile(iter, writer, reporter, jobConf);
+ writer.close();
+ } catch (IOException e) {
+ localFS.delete(outputPath, true);
+ throw e;
+ }
+
+ closeOnDiskFile(outputPath);
+
+ LOG.info(reduceId +
+ " Finished merging " + inputs.size() +
+ " map output files on disk of total-size " +
+ approxOutputSize + "." +
+ " Local output file is " + outputPath + " of size " +
+ localFS.getFileStatus(outputPath).getLen());
+ }
+ }
+
+ private void combineAndSpill(
+ RawKeyValueIterator kvIter,
+ Counters.Counter inCounter) throws IOException {
+ JobConf job = jobConf;
+ Reducer combiner = ReflectionUtils.newInstance(combinerClass, job);
+ Class<K> keyClass = (Class<K>) job.getMapOutputKeyClass();
+ Class<V> valClass = (Class<V>) job.getMapOutputValueClass();
+ RawComparator<K> comparator =
+ (RawComparator<K>)job.getOutputKeyComparator();
+ try {
+ CombineValuesIterator values = new CombineValuesIterator(
+ kvIter, comparator, keyClass, valClass, job, Reporter.NULL,
+ inCounter);
+ while (values.more()) {
+ combiner.reduce(values.getKey(), values, combineCollector,
+ Reporter.NULL);
+ values.nextKey();
+ }
+ } finally {
+ combiner.close();
+ }
+ }
+
+ private long createInMemorySegments(List<MapOutput<K,V>> inMemoryMapOutputs,
+ List<Segment<K, V>> inMemorySegments,
+ long leaveBytes
+ ) throws IOException {
+ long totalSize = 0L;
+ // We could use fullSize could come from the RamManager, but files can be
+ // closed but not yet present in inMemoryMapOutputs
+ long fullSize = 0L;
+ for (MapOutput<K,V> mo : inMemoryMapOutputs) {
+ fullSize += mo.getMemory().length;
+ }
+ while(fullSize > leaveBytes) {
+ MapOutput<K,V> mo = inMemoryMapOutputs.remove(0);
+ byte[] data = mo.getMemory();
+ long size = data.length;
+ totalSize += size;
+ fullSize -= size;
+ Reader<K,V> reader = new InMemoryReader<K,V>(MergeManager.this,
+ mo.getMapId(),
+ data, 0, (int)size);
+ inMemorySegments.add(new Segment<K,V>(reader, true,
+ (mo.isPrimaryMapOutput() ?
+ mergedMapOutputsCounter : null)));
+ }
+ return totalSize;
+ }
+
+ class RawKVIteratorReader extends IFile.Reader<K,V> {
+
+ private final RawKeyValueIterator kvIter;
+
+ public RawKVIteratorReader(RawKeyValueIterator kvIter, long size)
+ throws IOException {
+ super(null, null, size, null, spilledRecordsCounter);
+ this.kvIter = kvIter;
+ }
+ public boolean nextRawKey(DataInputBuffer key) throws IOException {
+ if (kvIter.next()) {
+ final DataInputBuffer kb = kvIter.getKey();
+ final int kp = kb.getPosition();
+ final int klen = kb.getLength() - kp;
+ key.reset(kb.getData(), kp, klen);
+ bytesRead += klen;
+ return true;
+ }
+ return false;
+ }
+ public void nextRawValue(DataInputBuffer value) throws IOException {
+ final DataInputBuffer vb = kvIter.getValue();
+ final int vp = vb.getPosition();
+ final int vlen = vb.getLength() - vp;
+ value.reset(vb.getData(), vp, vlen);
+ bytesRead += vlen;
+ }
+ public long getPosition() throws IOException {
+ return bytesRead;
+ }
+
+ public void close() throws IOException {
+ kvIter.close();
+ }
+ }
+
+ private RawKeyValueIterator finalMerge(JobConf job, FileSystem fs,
+ List<MapOutput<K,V>> inMemoryMapOutputs,
+ List<Path> onDiskMapOutputs
+ ) throws IOException {
+ LOG.info("finalMerge called with " +
+ inMemoryMapOutputs.size() + " in-memory map-outputs and " +
+ onDiskMapOutputs.size() + " on-disk map-outputs");
+
+ final float maxRedPer =
+ job.getFloat(MRJobConfig.REDUCE_INPUT_BUFFER_PERCENT, 0f);
+ if (maxRedPer > 1.0 || maxRedPer < 0.0) {
+ throw new IOException(MRJobConfig.REDUCE_INPUT_BUFFER_PERCENT +
+ maxRedPer);
+ }
+ int maxInMemReduce = (int)Math.min(
+ Runtime.getRuntime().maxMemory() * maxRedPer, Integer.MAX_VALUE);
+
+
+ // merge config params
+ Class<K> keyClass = (Class<K>)job.getMapOutputKeyClass();
+ Class<V> valueClass = (Class<V>)job.getMapOutputValueClass();
+ boolean keepInputs = job.getKeepFailedTaskFiles();
+ final Path tmpDir = new Path(reduceId.toString());
+ final RawComparator<K> comparator =
+ (RawComparator<K>)job.getOutputKeyComparator();
+
+ // segments required to vacate memory
+ List<Segment<K,V>> memDiskSegments = new ArrayList<Segment<K,V>>();
+ long inMemToDiskBytes = 0;
+ boolean mergePhaseFinished = false;
+ if (inMemoryMapOutputs.size() > 0) {
+ TaskID mapId = inMemoryMapOutputs.get(0).getMapId().getTaskID();
+ inMemToDiskBytes = createInMemorySegments(inMemoryMapOutputs,
+ memDiskSegments,
+ maxInMemReduce);
+ final int numMemDiskSegments = memDiskSegments.size();
+ if (numMemDiskSegments > 0 &&
+ ioSortFactor > onDiskMapOutputs.size()) {
+
+ // If we reach here, it implies that we have less than io.sort.factor
+ // disk segments and this will be incremented by 1 (result of the
+ // memory segments merge). Since this total would still be
+ // <= io.sort.factor, we will not do any more intermediate merges,
+ // the merge of all these disk segments would be directly fed to the
+ // reduce method
+
+ mergePhaseFinished = true;
+ // must spill to disk, but can't retain in-mem for intermediate merge
+ final Path outputPath =
+ mapOutputFile.getInputFileForWrite(mapId,
+ inMemToDiskBytes).suffix(
+ Task.MERGED_OUTPUT_PREFIX);
+ final RawKeyValueIterator rIter = Merger.merge(job, fs,
+ keyClass, valueClass, memDiskSegments, numMemDiskSegments,
+ tmpDir, comparator, reporter, spilledRecordsCounter, null,
+ mergePhase);
+ final Writer<K,V> writer = new Writer<K,V>(job, fs, outputPath,
+ keyClass, valueClass, codec, null);
+ try {
+ Merger.writeFile(rIter, writer, reporter, job);
+ // add to list of final disk outputs.
+ onDiskMapOutputs.add(outputPath);
+ } catch (IOException e) {
+ if (null != outputPath) {
+ try {
+ fs.delete(outputPath, true);
+ } catch (IOException ie) {
+ // NOTHING
+ }
+ }
+ throw e;
+ } finally {
+ if (null != writer) {
+ writer.close();
+ }
+ }
+ LOG.info("Merged " + numMemDiskSegments + " segments, " +
+ inMemToDiskBytes + " bytes to disk to satisfy " +
+ "reduce memory limit");
+ inMemToDiskBytes = 0;
+ memDiskSegments.clear();
+ } else if (inMemToDiskBytes != 0) {
+ LOG.info("Keeping " + numMemDiskSegments + " segments, " +
+ inMemToDiskBytes + " bytes in memory for " +
+ "intermediate, on-disk merge");
+ }
+ }
+
+ // segments on disk
+ List<Segment<K,V>> diskSegments = new ArrayList<Segment<K,V>>();
+ long onDiskBytes = inMemToDiskBytes;
+ Path[] onDisk = onDiskMapOutputs.toArray(new Path[onDiskMapOutputs.size()]);
+ for (Path file : onDisk) {
+ onDiskBytes += fs.getFileStatus(file).getLen();
+ LOG.debug("Disk file: " + file + " Length is " +
+ fs.getFileStatus(file).getLen());
+ diskSegments.add(new Segment<K, V>(job, fs, file, codec, keepInputs,
+ (file.toString().endsWith(
+ Task.MERGED_OUTPUT_PREFIX) ?
+ null : mergedMapOutputsCounter)
+ ));
+ }
+ LOG.info("Merging " + onDisk.length + " files, " +
+ onDiskBytes + " bytes from disk");
+ Collections.sort(diskSegments, new Comparator<Segment<K,V>>() {
+ public int compare(Segment<K, V> o1, Segment<K, V> o2) {
+ if (o1.getLength() == o2.getLength()) {
+ return 0;
+ }
+ return o1.getLength() < o2.getLength() ? -1 : 1;
+ }
+ });
+
+ // build final list of segments from merged backed by disk + in-mem
+ List<Segment<K,V>> finalSegments = new ArrayList<Segment<K,V>>();
+ long inMemBytes = createInMemorySegments(inMemoryMapOutputs,
+ finalSegments, 0);
+ LOG.info("Merging " + finalSegments.size() + " segments, " +
+ inMemBytes + " bytes from memory into reduce");
+ if (0 != onDiskBytes) {
+ final int numInMemSegments = memDiskSegments.size();
+ diskSegments.addAll(0, memDiskSegments);
+ memDiskSegments.clear();
+ // Pass mergePhase only if there is a going to be intermediate
+ // merges. See comment where mergePhaseFinished is being set
+ Progress thisPhase = (mergePhaseFinished) ? null : mergePhase;
+ RawKeyValueIterator diskMerge = Merger.merge(
+ job, fs, keyClass, valueClass, diskSegments,
+ ioSortFactor, numInMemSegments, tmpDir, comparator,
+ reporter, false, spilledRecordsCounter, null, thisPhase);
+ diskSegments.clear();
+ if (0 == finalSegments.size()) {
+ return diskMerge;
+ }
+ finalSegments.add(new Segment<K,V>(
+ new RawKVIteratorReader(diskMerge, onDiskBytes), true));
+ }
+ return Merger.merge(job, fs, keyClass, valueClass,
+ finalSegments, finalSegments.size(), tmpDir,
+ comparator, reporter, spilledRecordsCounter, null,
+ null);
+
+ }
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleClientMetrics.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleClientMetrics.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleClientMetrics.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleClientMetrics.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleHeader.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleHeader.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleHeader.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleHeader.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleScheduler.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleScheduler.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleScheduler.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleScheduler.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/package-info.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/package-info.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/package-info.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/package-info.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/tools/CLI.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/tools/CLI.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/util/CountersStrings.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/util/CountersStrings.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/HostUtil.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/HostUtil.java
new file mode 100644
index 0000000..772d85d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/HostUtil.java
@@ -0,0 +1,33 @@
+package org.apache.hadoop.mapreduce.util;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
+@Private
+@Unstable
+public class HostUtil {
+
+ /**
+ * Construct the taskLogUrl
+ * @param taskTrackerHostName
+ * @param httpPort
+ * @param taskAttemptID
+ * @return the taskLogUrl
+ */
+ public static String getTaskLogUrl(String taskTrackerHostName,
+ String httpPort, String taskAttemptID) {
+ return ("http://" + taskTrackerHostName + ":" + httpPort
+ + "/tasklog?attemptid=" + taskAttemptID);
+ }
+
+ public static String convertTrackerNameToHostName(String trackerName) {
+ // Ugly!
+ // Convert the trackerName to its host name
+ int indexOfColon = trackerName.indexOf(":");
+ String trackerHostName = (indexOfColon == -1) ?
+ trackerName :
+ trackerName.substring(0, indexOfColon);
+ return trackerHostName.substring("tracker_".length());
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/LinuxResourceCalculatorPlugin.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/LinuxResourceCalculatorPlugin.java
new file mode 100644
index 0000000..280b7b6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/LinuxResourceCalculatorPlugin.java
@@ -0,0 +1,412 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.util;
+
+import java.io.BufferedReader;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.IOException;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Plugin to calculate resource information on Linux systems.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class LinuxResourceCalculatorPlugin extends ResourceCalculatorPlugin {
+ private static final Log LOG =
+ LogFactory.getLog(LinuxResourceCalculatorPlugin.class);
+
+ public static final int UNAVAILABLE = -1;
+
+ /**
+ * proc's meminfo virtual file has keys-values in the format
+ * "key:[ \t]*value[ \t]kB".
+ */
+ private static final String PROCFS_MEMFILE = "/proc/meminfo";
+ private static final Pattern PROCFS_MEMFILE_FORMAT =
+ Pattern.compile("^([a-zA-Z]*):[ \t]*([0-9]*)[ \t]kB");
+
+ // We need the values for the following keys in meminfo
+ private static final String MEMTOTAL_STRING = "MemTotal";
+ private static final String SWAPTOTAL_STRING = "SwapTotal";
+ private static final String MEMFREE_STRING = "MemFree";
+ private static final String SWAPFREE_STRING = "SwapFree";
+ private static final String INACTIVE_STRING = "Inactive";
+
+ /**
+ * Patterns for parsing /proc/cpuinfo
+ */
+ private static final String PROCFS_CPUINFO = "/proc/cpuinfo";
+ private static final Pattern PROCESSOR_FORMAT =
+ Pattern.compile("^processor[ \t]:[ \t]*([0-9]*)");
+ private static final Pattern FREQUENCY_FORMAT =
+ Pattern.compile("^cpu MHz[ \t]*:[ \t]*([0-9.]*)");
+
+ /**
+ * Pattern for parsing /proc/stat
+ */
+ private static final String PROCFS_STAT = "/proc/stat";
+ private static final Pattern CPU_TIME_FORMAT =
+ Pattern.compile("^cpu[ \t]*([0-9]*)" +
+ "[ \t]*([0-9]*)[ \t]*([0-9]*)[ \t].*");
+
+ private String procfsMemFile;
+ private String procfsCpuFile;
+ private String procfsStatFile;
+ long jiffyLengthInMillis;
+
+ private long ramSize = 0;
+ private long swapSize = 0;
+ private long ramSizeFree = 0; // free ram space on the machine (kB)
+ private long swapSizeFree = 0; // free swap space on the machine (kB)
+ private long inactiveSize = 0; // inactive cache memory (kB)
+ private int numProcessors = 0; // number of processors on the system
+ private long cpuFrequency = 0L; // CPU frequency on the system (kHz)
+ private long cumulativeCpuTime = 0L; // CPU used time since system is on (ms)
+ private long lastCumulativeCpuTime = 0L; // CPU used time read last time (ms)
+ // Unix timestamp while reading the CPU time (ms)
+ private float cpuUsage = UNAVAILABLE;
+ private long sampleTime = UNAVAILABLE;
+ private long lastSampleTime = UNAVAILABLE;
+ private ProcfsBasedProcessTree pTree = null;
+
+ boolean readMemInfoFile = false;
+ boolean readCpuInfoFile = false;
+
+ /**
+ * Get current time
+ * @return Unix time stamp in millisecond
+ */
+ long getCurrentTime() {
+ return System.currentTimeMillis();
+ }
+
+ public LinuxResourceCalculatorPlugin() {
+ procfsMemFile = PROCFS_MEMFILE;
+ procfsCpuFile = PROCFS_CPUINFO;
+ procfsStatFile = PROCFS_STAT;
+ jiffyLengthInMillis = ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS;
+ String pid = System.getenv().get("JVM_PID");
+ pTree = new ProcfsBasedProcessTree(pid);
+ }
+
+ /**
+ * Constructor which allows assigning the /proc/ directories. This will be
+ * used only in unit tests
+ * @param procfsMemFile fake file for /proc/meminfo
+ * @param procfsCpuFile fake file for /proc/cpuinfo
+ * @param procfsStatFile fake file for /proc/stat
+ * @param jiffyLengthInMillis fake jiffy length value
+ */
+ public LinuxResourceCalculatorPlugin(String procfsMemFile,
+ String procfsCpuFile,
+ String procfsStatFile,
+ long jiffyLengthInMillis) {
+ this.procfsMemFile = procfsMemFile;
+ this.procfsCpuFile = procfsCpuFile;
+ this.procfsStatFile = procfsStatFile;
+ this.jiffyLengthInMillis = jiffyLengthInMillis;
+ String pid = System.getenv().get("JVM_PID");
+ pTree = new ProcfsBasedProcessTree(pid);
+ }
+
+ /**
+ * Read /proc/meminfo, parse and compute memory information only once
+ */
+ private void readProcMemInfoFile() {
+ readProcMemInfoFile(false);
+ }
+
+ /**
+ * Read /proc/meminfo, parse and compute memory information
+ * @param readAgain if false, read only on the first time
+ */
+ private void readProcMemInfoFile(boolean readAgain) {
+
+ if (readMemInfoFile && !readAgain) {
+ return;
+ }
+
+ // Read "/proc/memInfo" file
+ BufferedReader in = null;
+ FileReader fReader = null;
+ try {
+ fReader = new FileReader(procfsMemFile);
+ in = new BufferedReader(fReader);
+ } catch (FileNotFoundException f) {
+ // shouldn't happen....
+ return;
+ }
+
+ Matcher mat = null;
+
+ try {
+ String str = in.readLine();
+ while (str != null) {
+ mat = PROCFS_MEMFILE_FORMAT.matcher(str);
+ if (mat.find()) {
+ if (mat.group(1).equals(MEMTOTAL_STRING)) {
+ ramSize = Long.parseLong(mat.group(2));
+ } else if (mat.group(1).equals(SWAPTOTAL_STRING)) {
+ swapSize = Long.parseLong(mat.group(2));
+ } else if (mat.group(1).equals(MEMFREE_STRING)) {
+ ramSizeFree = Long.parseLong(mat.group(2));
+ } else if (mat.group(1).equals(SWAPFREE_STRING)) {
+ swapSizeFree = Long.parseLong(mat.group(2));
+ } else if (mat.group(1).equals(INACTIVE_STRING)) {
+ inactiveSize = Long.parseLong(mat.group(2));
+ }
+ }
+ str = in.readLine();
+ }
+ } catch (IOException io) {
+ LOG.warn("Error reading the stream " + io);
+ } finally {
+ // Close the streams
+ try {
+ fReader.close();
+ try {
+ in.close();
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + in);
+ }
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + fReader);
+ }
+ }
+
+ readMemInfoFile = true;
+ }
+
+ /**
+ * Read /proc/cpuinfo, parse and calculate CPU information
+ */
+ private void readProcCpuInfoFile() {
+ // This directory needs to be read only once
+ if (readCpuInfoFile) {
+ return;
+ }
+ // Read "/proc/cpuinfo" file
+ BufferedReader in = null;
+ FileReader fReader = null;
+ try {
+ fReader = new FileReader(procfsCpuFile);
+ in = new BufferedReader(fReader);
+ } catch (FileNotFoundException f) {
+ // shouldn't happen....
+ return;
+ }
+ Matcher mat = null;
+ try {
+ numProcessors = 0;
+ String str = in.readLine();
+ while (str != null) {
+ mat = PROCESSOR_FORMAT.matcher(str);
+ if (mat.find()) {
+ numProcessors++;
+ }
+ mat = FREQUENCY_FORMAT.matcher(str);
+ if (mat.find()) {
+ cpuFrequency = (long)(Double.parseDouble(mat.group(1)) * 1000); // kHz
+ }
+ str = in.readLine();
+ }
+ } catch (IOException io) {
+ LOG.warn("Error reading the stream " + io);
+ } finally {
+ // Close the streams
+ try {
+ fReader.close();
+ try {
+ in.close();
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + in);
+ }
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + fReader);
+ }
+ }
+ readCpuInfoFile = true;
+ }
+
+ /**
+ * Read /proc/stat file, parse and calculate cumulative CPU
+ */
+ private void readProcStatFile() {
+ // Read "/proc/stat" file
+ BufferedReader in = null;
+ FileReader fReader = null;
+ try {
+ fReader = new FileReader(procfsStatFile);
+ in = new BufferedReader(fReader);
+ } catch (FileNotFoundException f) {
+ // shouldn't happen....
+ return;
+ }
+
+ Matcher mat = null;
+ try {
+ String str = in.readLine();
+ while (str != null) {
+ mat = CPU_TIME_FORMAT.matcher(str);
+ if (mat.find()) {
+ long uTime = Long.parseLong(mat.group(1));
+ long nTime = Long.parseLong(mat.group(2));
+ long sTime = Long.parseLong(mat.group(3));
+ cumulativeCpuTime = uTime + nTime + sTime; // milliseconds
+ break;
+ }
+ str = in.readLine();
+ }
+ cumulativeCpuTime *= jiffyLengthInMillis;
+ } catch (IOException io) {
+ LOG.warn("Error reading the stream " + io);
+ } finally {
+ // Close the streams
+ try {
+ fReader.close();
+ try {
+ in.close();
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + in);
+ }
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + fReader);
+ }
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getPhysicalMemorySize() {
+ readProcMemInfoFile();
+ return ramSize * 1024;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getVirtualMemorySize() {
+ readProcMemInfoFile();
+ return (ramSize + swapSize) * 1024;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getAvailablePhysicalMemorySize() {
+ readProcMemInfoFile(true);
+ return (ramSizeFree + inactiveSize) * 1024;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getAvailableVirtualMemorySize() {
+ readProcMemInfoFile(true);
+ return (ramSizeFree + swapSizeFree + inactiveSize) * 1024;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public int getNumProcessors() {
+ readProcCpuInfoFile();
+ return numProcessors;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getCpuFrequency() {
+ readProcCpuInfoFile();
+ return cpuFrequency;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getCumulativeCpuTime() {
+ readProcStatFile();
+ return cumulativeCpuTime;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public float getCpuUsage() {
+ readProcStatFile();
+ sampleTime = getCurrentTime();
+ if (lastSampleTime == UNAVAILABLE ||
+ lastSampleTime > sampleTime) {
+ // lastSampleTime > sampleTime may happen when the system time is changed
+ lastSampleTime = sampleTime;
+ lastCumulativeCpuTime = cumulativeCpuTime;
+ return cpuUsage;
+ }
+ // When lastSampleTime is sufficiently old, update cpuUsage.
+ // Also take a sample of the current time and cumulative CPU time for the
+ // use of the next calculation.
+ final long MINIMUM_UPDATE_INTERVAL = 10 * jiffyLengthInMillis;
+ if (sampleTime > lastSampleTime + MINIMUM_UPDATE_INTERVAL) {
+ cpuUsage = (float)(cumulativeCpuTime - lastCumulativeCpuTime) * 100F /
+ ((float)(sampleTime - lastSampleTime) * getNumProcessors());
+ lastSampleTime = sampleTime;
+ lastCumulativeCpuTime = cumulativeCpuTime;
+ }
+ return cpuUsage;
+ }
+
+ /**
+ * Test the {@link LinuxResourceCalculatorPlugin}
+ *
+ * @param args
+ */
+ public static void main(String[] args) {
+ LinuxResourceCalculatorPlugin plugin = new LinuxResourceCalculatorPlugin();
+ System.out.println("Physical memory Size (bytes) : "
+ + plugin.getPhysicalMemorySize());
+ System.out.println("Total Virtual memory Size (bytes) : "
+ + plugin.getVirtualMemorySize());
+ System.out.println("Available Physical memory Size (bytes) : "
+ + plugin.getAvailablePhysicalMemorySize());
+ System.out.println("Total Available Virtual memory Size (bytes) : "
+ + plugin.getAvailableVirtualMemorySize());
+ System.out.println("Number of Processors : " + plugin.getNumProcessors());
+ System.out.println("CPU frequency (kHz) : " + plugin.getCpuFrequency());
+ System.out.println("Cumulative CPU time (ms) : " +
+ plugin.getCumulativeCpuTime());
+ try {
+ // Sleep so we can compute the CPU usage
+ Thread.sleep(500L);
+ } catch (InterruptedException e) {
+ // do nothing
+ }
+ System.out.println("CPU usage % : " + plugin.getCpuUsage());
+ }
+
+ @Override
+ public ProcResourceValues getProcResourceValues() {
+ pTree = pTree.getProcessTree();
+ long cpuTime = pTree.getCumulativeCpuTime();
+ long pMem = pTree.getCumulativeRssmem();
+ long vMem = pTree.getCumulativeVmem();
+ return new ProcResourceValues(cpuTime, pMem, vMem);
+ }
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/util/ProcessTree.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcessTree.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/util/ProcessTree.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcessTree.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/util/ProcfsBasedProcessTree.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcfsBasedProcessTree.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/util/ProcfsBasedProcessTree.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcfsBasedProcessTree.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/util/ResourceBundles.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ResourceBundles.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/util/ResourceBundles.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ResourceBundles.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/util/ResourceCalculatorPlugin.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ResourceCalculatorPlugin.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/util/ResourceCalculatorPlugin.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ResourceCalculatorPlugin.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/util/package-info.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/package-info.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/util/package-info.java
rename to hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/package-info.java
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/util/TestLinuxResourceCalculatorPlugin.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/util/TestLinuxResourceCalculatorPlugin.java
new file mode 100644
index 0000000..413e615
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/util/TestLinuxResourceCalculatorPlugin.java
@@ -0,0 +1,236 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.util;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.util.LinuxResourceCalculatorPlugin;
+import org.junit.Test;
+
+/**
+ * A JUnit test to test {@link LinuxResourceCalculatorPlugin}
+ * Create the fake /proc/ information and verify the parsing and calculation
+ */
+public class TestLinuxResourceCalculatorPlugin extends TestCase {
+ /**
+ * LinuxResourceCalculatorPlugin with a fake timer
+ */
+ static class FakeLinuxResourceCalculatorPlugin extends
+ LinuxResourceCalculatorPlugin {
+
+ long currentTime = 0;
+ public FakeLinuxResourceCalculatorPlugin(String procfsMemFile,
+ String procfsCpuFile,
+ String procfsStatFile,
+ long jiffyLengthInMillis) {
+ super(procfsMemFile, procfsCpuFile, procfsStatFile, jiffyLengthInMillis);
+ }
+ @Override
+ long getCurrentTime() {
+ return currentTime;
+ }
+ public void advanceTime(long adv) {
+ currentTime += adv * jiffyLengthInMillis;
+ }
+ }
+ private static final FakeLinuxResourceCalculatorPlugin plugin;
+ private static String TEST_ROOT_DIR = new Path(System.getProperty(
+ "test.build.data", "/tmp")).toString().replace(' ', '+');
+ private static final String FAKE_MEMFILE;
+ private static final String FAKE_CPUFILE;
+ private static final String FAKE_STATFILE;
+ private static final long FAKE_JIFFY_LENGTH = 10L;
+ static {
+ int randomNum = (new Random()).nextInt(1000000000);
+ FAKE_MEMFILE = TEST_ROOT_DIR + File.separator + "MEMINFO_" + randomNum;
+ FAKE_CPUFILE = TEST_ROOT_DIR + File.separator + "CPUINFO_" + randomNum;
+ FAKE_STATFILE = TEST_ROOT_DIR + File.separator + "STATINFO_" + randomNum;
+ plugin = new FakeLinuxResourceCalculatorPlugin(FAKE_MEMFILE, FAKE_CPUFILE,
+ FAKE_STATFILE,
+ FAKE_JIFFY_LENGTH);
+ }
+ static final String MEMINFO_FORMAT =
+ "MemTotal: %d kB\n" +
+ "MemFree: %d kB\n" +
+ "Buffers: 138244 kB\n" +
+ "Cached: 947780 kB\n" +
+ "SwapCached: 142880 kB\n" +
+ "Active: 3229888 kB\n" +
+ "Inactive: %d kB\n" +
+ "SwapTotal: %d kB\n" +
+ "SwapFree: %d kB\n" +
+ "Dirty: 122012 kB\n" +
+ "Writeback: 0 kB\n" +
+ "AnonPages: 2710792 kB\n" +
+ "Mapped: 24740 kB\n" +
+ "Slab: 132528 kB\n" +
+ "SReclaimable: 105096 kB\n" +
+ "SUnreclaim: 27432 kB\n" +
+ "PageTables: 11448 kB\n" +
+ "NFS_Unstable: 0 kB\n" +
+ "Bounce: 0 kB\n" +
+ "CommitLimit: 4125904 kB\n" +
+ "Committed_AS: 4143556 kB\n" +
+ "VmallocTotal: 34359738367 kB\n" +
+ "VmallocUsed: 1632 kB\n" +
+ "VmallocChunk: 34359736375 kB\n" +
+ "HugePages_Total: 0\n" +
+ "HugePages_Free: 0\n" +
+ "HugePages_Rsvd: 0\n" +
+ "Hugepagesize: 2048 kB";
+
+ static final String CPUINFO_FORMAT =
+ "processor : %s\n" +
+ "vendor_id : AuthenticAMD\n" +
+ "cpu family : 15\n" +
+ "model : 33\n" +
+ "model name : Dual Core AMD Opteron(tm) Processor 280\n" +
+ "stepping : 2\n" +
+ "cpu MHz : %f\n" +
+ "cache size : 1024 KB\n" +
+ "physical id : 0\n" +
+ "siblings : 2\n" +
+ "core id : 0\n" +
+ "cpu cores : 2\n" +
+ "fpu : yes\n" +
+ "fpu_exception : yes\n" +
+ "cpuid level : 1\n" +
+ "wp : yes\n" +
+ "flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov " +
+ "pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt lm " +
+ "3dnowext 3dnow pni lahf_lm cmp_legacy\n" +
+ "bogomips : 4792.41\n" +
+ "TLB size : 1024 4K pages\n" +
+ "clflush size : 64\n" +
+ "cache_alignment : 64\n" +
+ "address sizes : 40 bits physical, 48 bits virtual\n" +
+ "power management: ts fid vid ttp";
+
+ static final String STAT_FILE_FORMAT =
+ "cpu %d %d %d 1646495089 831319 48713 164346 0\n" +
+ "cpu0 15096055 30805 3823005 411456015 206027 13 14269 0\n" +
+ "cpu1 14760561 89890 6432036 408707910 456857 48074 130857 0\n" +
+ "cpu2 12761169 20842 3758639 413976772 98028 411 10288 0\n" +
+ "cpu3 12355207 47322 5789691 412354390 70406 213 8931 0\n" +
+ "intr 114648668 20010764 2 0 945665 2 0 0 0 0 0 0 0 4 0 0 0 0 0 0\n" +
+ "ctxt 242017731764\n" +
+ "btime 1257808753\n" +
+ "processes 26414943\n" +
+ "procs_running 1\n" +
+ "procs_blocked 0\n";
+
+ /**
+ * Test parsing /proc/stat and /proc/cpuinfo
+ * @throws IOException
+ */
+ @Test
+ public void testParsingProcStatAndCpuFile() throws IOException {
+ // Write fake /proc/cpuinfo file.
+ long numProcessors = 8;
+ long cpuFrequencyKHz = 2392781;
+ String fileContent = "";
+ for (int i = 0; i < numProcessors; i++) {
+ fileContent += String.format(CPUINFO_FORMAT, i, cpuFrequencyKHz / 1000D) +
+ "\n";
+ }
+ File tempFile = new File(FAKE_CPUFILE);
+ tempFile.deleteOnExit();
+ FileWriter fWriter = new FileWriter(FAKE_CPUFILE);
+ fWriter.write(fileContent);
+ fWriter.close();
+ assertEquals(plugin.getNumProcessors(), numProcessors);
+ assertEquals(plugin.getCpuFrequency(), cpuFrequencyKHz);
+
+ // Write fake /proc/stat file.
+ long uTime = 54972994;
+ long nTime = 188860;
+ long sTime = 19803373;
+ tempFile = new File(FAKE_STATFILE);
+ tempFile.deleteOnExit();
+ updateStatFile(uTime, nTime, sTime);
+ assertEquals(plugin.getCumulativeCpuTime(),
+ FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
+ assertEquals(plugin.getCpuUsage(), (float)(LinuxResourceCalculatorPlugin.UNAVAILABLE));
+
+ // Advance the time and sample again to test the CPU usage calculation
+ uTime += 100L;
+ plugin.advanceTime(200L);
+ updateStatFile(uTime, nTime, sTime);
+ assertEquals(plugin.getCumulativeCpuTime(),
+ FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
+ assertEquals(plugin.getCpuUsage(), 6.25F);
+
+ // Advance the time and sample again. This time, we call getCpuUsage() only.
+ uTime += 600L;
+ plugin.advanceTime(300L);
+ updateStatFile(uTime, nTime, sTime);
+ assertEquals(plugin.getCpuUsage(), 25F);
+
+ // Advance very short period of time (one jiffy length).
+ // In this case, CPU usage should not be updated.
+ uTime += 1L;
+ plugin.advanceTime(1L);
+ updateStatFile(uTime, nTime, sTime);
+ assertEquals(plugin.getCumulativeCpuTime(),
+ FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
+ assertEquals(plugin.getCpuUsage(), 25F); // CPU usage is not updated.
+ }
+
+ /**
+ * Write information to fake /proc/stat file
+ */
+ private void updateStatFile(long uTime, long nTime, long sTime)
+ throws IOException {
+ FileWriter fWriter = new FileWriter(FAKE_STATFILE);
+ fWriter.write(String.format(STAT_FILE_FORMAT, uTime, nTime, sTime));
+ fWriter.close();
+ }
+
+ /**
+ * Test parsing /proc/meminfo
+ * @throws IOException
+ */
+ @Test
+ public void testParsingProcMemFile() throws IOException {
+ long memTotal = 4058864L;
+ long memFree = 99632L;
+ long inactive = 567732L;
+ long swapTotal = 2096472L;
+ long swapFree = 1818480L;
+ File tempFile = new File(FAKE_MEMFILE);
+ tempFile.deleteOnExit();
+ FileWriter fWriter = new FileWriter(FAKE_MEMFILE);
+ fWriter.write(String.format(MEMINFO_FORMAT,
+ memTotal, memFree, inactive, swapTotal, swapFree));
+
+ fWriter.close();
+ assertEquals(plugin.getAvailablePhysicalMemorySize(),
+ 1024L * (memFree + inactive));
+ assertEquals(plugin.getAvailableVirtualMemorySize(),
+ 1024L * (memFree + inactive + swapFree));
+ assertEquals(plugin.getPhysicalMemorySize(), 1024L * memTotal);
+ assertEquals(plugin.getVirtualMemorySize(), 1024L * (memTotal + swapTotal));
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/pom.xml b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/pom.xml
new file mode 100644
index 0000000..785ad6a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/pom.xml
@@ -0,0 +1,45 @@
+<?xml version="1.0"?>
+<project>
+ <parent>
+ <artifactId>hadoop-mapreduce-client</artifactId>
+ <groupId>org.apache.hadoop</groupId>
+ <version>${hadoop-mapreduce.version}</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-hs</artifactId>
+ <name>hadoop-mapreduce-client-hs</name>
+
+ <properties>
+ <install.file>${project.artifact.file}</install.file>
+ <mr.basedir>${project.parent.parent.basedir}</mr.basedir>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-app</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-app</artifactId>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <excludes>
+ </excludes>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
new file mode 100644
index 0000000..abb9978
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
@@ -0,0 +1,300 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.hs;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.JobACLsManager;
+import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+
+
+/**
+ * Loads the basic job level data upfront.
+ * Data from job history file is loaded lazily.
+ */
+public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job {
+
+ static final Log LOG = LogFactory.getLog(CompletedJob.class);
+ private final Counters counters;
+ private final Configuration conf;
+ private final JobId jobId;
+ private final List<String> diagnostics = new ArrayList<String>();
+ private final JobReport report;
+ private final Map<TaskId, Task> tasks = new HashMap<TaskId, Task>();
+ private final Map<TaskId, Task> mapTasks = new HashMap<TaskId, Task>();
+ private final Map<TaskId, Task> reduceTasks = new HashMap<TaskId, Task>();
+
+ private List<TaskAttemptCompletionEvent> completionEvents = null;
+ private JobInfo jobInfo;
+
+ public CompletedJob(Configuration conf, JobId jobId, Path historyFile, boolean loadTasks) throws IOException {
+ LOG.info("Loading job: " + jobId + " from file: " + historyFile);
+ this.conf = conf;
+ this.jobId = jobId;
+
+ loadFullHistoryData(loadTasks, historyFile);
+
+ counters = TypeConverter.toYarn(jobInfo.getTotalCounters());
+ diagnostics.add(jobInfo.getErrorInfo());
+ report = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobReport.class);
+ report.setJobId(jobId);
+ report.setJobState(JobState.valueOf(jobInfo.getJobStatus()));
+ report.setStartTime(jobInfo.getLaunchTime());
+ report.setFinishTime(jobInfo.getFinishTime());
+ //TOODO Possibly populate job progress. Never used.
+ //report.setMapProgress(progress)
+ //report.setReduceProgress(progress)
+ }
+
+ @Override
+ public int getCompletedMaps() {
+ return (int) jobInfo.getFinishedMaps();
+ }
+
+ @Override
+ public int getCompletedReduces() {
+ return (int) jobInfo.getFinishedReduces();
+ }
+
+ @Override
+ public Counters getCounters() {
+ return counters;
+ }
+
+ @Override
+ public JobId getID() {
+ return jobId;
+ }
+
+ @Override
+ public JobReport getReport() {
+ return report;
+ }
+
+ @Override
+ public JobState getState() {
+ return report.getJobState();
+ }
+
+ @Override
+ public Task getTask(TaskId taskId) {
+ return tasks.get(taskId);
+ }
+
+ @Override
+ public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
+ int fromEventId, int maxEvents) {
+ if (completionEvents == null) {
+ constructTaskAttemptCompletionEvents();
+ }
+ TaskAttemptCompletionEvent[] events = new TaskAttemptCompletionEvent[0];
+ if (completionEvents.size() > fromEventId) {
+ int actualMax = Math.min(maxEvents,
+ (completionEvents.size() - fromEventId));
+ events = completionEvents.subList(fromEventId, actualMax + fromEventId)
+ .toArray(events);
+ }
+ return events;
+ }
+
+ private void constructTaskAttemptCompletionEvents() {
+ completionEvents = new LinkedList<TaskAttemptCompletionEvent>();
+ List<TaskAttempt> allTaskAttempts = new LinkedList<TaskAttempt>();
+ for (TaskId taskId : tasks.keySet()) {
+ Task task = tasks.get(taskId);
+ for (TaskAttemptId taskAttemptId : task.getAttempts().keySet()) {
+ TaskAttempt taskAttempt = task.getAttempts().get(taskAttemptId);
+ allTaskAttempts.add(taskAttempt);
+ }
+ }
+ Collections.sort(allTaskAttempts, new Comparator<TaskAttempt>() {
+
+ @Override
+ public int compare(TaskAttempt o1, TaskAttempt o2) {
+ if (o1.getFinishTime() == 0 || o2.getFinishTime() == 0) {
+ if (o1.getFinishTime() == 0 && o2.getFinishTime() == 0) {
+ if (o1.getLaunchTime() == 0 || o2.getLaunchTime() == 0) {
+ if (o1.getLaunchTime() == 0 && o2.getLaunchTime() == 0) {
+ return 0;
+ } else {
+ long res = o1.getLaunchTime() - o2.getLaunchTime();
+ return res > 0 ? -1 : 1;
+ }
+ } else {
+ return (int) (o1.getLaunchTime() - o2.getLaunchTime());
+ }
+ } else {
+ long res = o1.getFinishTime() - o2.getFinishTime();
+ return res > 0 ? -1 : 1;
+ }
+ } else {
+ return (int) (o1.getFinishTime() - o2.getFinishTime());
+ }
+ }
+ });
+
+ int eventId = 0;
+ for (TaskAttempt taskAttempt : allTaskAttempts) {
+
+ TaskAttemptCompletionEvent tace = RecordFactoryProvider.getRecordFactory(
+ null).newRecordInstance(TaskAttemptCompletionEvent.class);
+
+ int attemptRunTime = -1;
+ if (taskAttempt.getLaunchTime() != 0 && taskAttempt.getFinishTime() != 0) {
+ attemptRunTime = (int) (taskAttempt.getFinishTime() - taskAttempt
+ .getLaunchTime());
+ }
+ // Default to KILLED
+ TaskAttemptCompletionEventStatus taceStatus = TaskAttemptCompletionEventStatus.KILLED;
+ String taStateString = taskAttempt.getState().toString();
+ try {
+ taceStatus = TaskAttemptCompletionEventStatus.valueOf(taStateString);
+ } catch (Exception e) {
+ LOG.warn("Cannot constuct TACEStatus from TaskAtemptState: ["
+ + taStateString + "] for taskAttemptId: [" + taskAttempt.getID()
+ + "]. Defaulting to KILLED");
+ }
+
+ tace.setAttemptId(taskAttempt.getID());
+ tace.setAttemptRunTime(attemptRunTime);
+ tace.setEventId(eventId++);
+ tace.setMapOutputServerAddress(taskAttempt
+ .getAssignedContainerMgrAddress());
+ tace.setStatus(taceStatus);
+ completionEvents.add(tace);
+ }
+ }
+
+ @Override
+ public Map<TaskId, Task> getTasks() {
+ return tasks;
+ }
+
+ //History data is leisurely loaded when task level data is requested
+ private synchronized void loadFullHistoryData(boolean loadTasks, Path historyFileAbsolute) throws IOException {
+ LOG.info("Loading history file: [" + historyFileAbsolute + "]");
+ if (jobInfo != null) {
+ return; //data already loaded
+ }
+
+ if (historyFileAbsolute != null) {
+ try {
+ JobHistoryParser parser = new JobHistoryParser(historyFileAbsolute.getFileSystem(conf), historyFileAbsolute);
+ jobInfo = parser.parse();
+ } catch (IOException e) {
+ throw new YarnException("Could not load history file " + historyFileAbsolute,
+ e);
+ }
+ } else {
+ throw new IOException("History file not found");
+ }
+
+ if (loadTasks) {
+ for (Map.Entry<org.apache.hadoop.mapreduce.TaskID, TaskInfo> entry : jobInfo
+ .getAllTasks().entrySet()) {
+ TaskId yarnTaskID = TypeConverter.toYarn(entry.getKey());
+ TaskInfo taskInfo = entry.getValue();
+ Task task = new CompletedTask(yarnTaskID, taskInfo);
+ tasks.put(yarnTaskID, task);
+ if (task.getType() == TaskType.MAP) {
+ mapTasks.put(task.getID(), task);
+ } else if (task.getType() == TaskType.REDUCE) {
+ reduceTasks.put(task.getID(), task);
+ }
+ }
+ }
+ LOG.info("TaskInfo loaded");
+ }
+
+ @Override
+ public List<String> getDiagnostics() {
+ return diagnostics;
+ }
+
+ @Override
+ public String getName() {
+ return jobInfo.getJobname();
+ }
+
+ @Override
+ public int getTotalMaps() {
+ return (int) jobInfo.getTotalMaps();
+ }
+
+ @Override
+ public int getTotalReduces() {
+ return (int) jobInfo.getTotalReduces();
+ }
+
+ @Override
+ public boolean isUber() {
+ throw new YarnException("Not yet implemented!");
+ }
+
+ @Override
+ public Map<TaskId, Task> getTasks(TaskType taskType) {
+ if (TaskType.MAP.equals(taskType)) {
+ return mapTasks;
+ } else {//we have only two types of tasks
+ return reduceTasks;
+ }
+ }
+
+ @Override
+ public boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation) {
+ if (!UserGroupInformation.isSecurityEnabled()) {
+ return true;
+ }
+ Map<JobACL, AccessControlList> jobACLs = jobInfo.getJobACLs();
+ AccessControlList jobACL = jobACLs.get(jobOperation);
+ JobACLsManager aclsMgr = new JobACLsManager(conf);
+ return aclsMgr.checkAccess(callerUGI, jobOperation,
+ jobInfo.getUsername(), jobACL);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTask.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTask.java
new file mode 100644
index 0000000..b02f6c5
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTask.java
@@ -0,0 +1,143 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.hs;
+
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+
+public class CompletedTask implements Task {
+
+
+ private final TaskType type;
+ private Counters counters;
+ private final long startTime;
+ private final long finishTime;
+ private TaskState state;
+ private final TaskId taskId;
+ private final TaskReport report;
+ private final Map<TaskAttemptId, TaskAttempt> attempts =
+ new LinkedHashMap<TaskAttemptId, TaskAttempt>();
+
+ private static final Log LOG = LogFactory.getLog(CompletedTask.class);
+
+ CompletedTask(TaskId taskId, TaskInfo taskInfo) {
+ //TODO JobHistoryParser.handleTaskFailedAttempt should use state from the event.
+ LOG.debug("HandlingTaskId: [" + taskId + "]");
+ this.taskId = taskId;
+ this.startTime = taskInfo.getStartTime();
+ this.finishTime = taskInfo.getFinishTime();
+ this.type = TypeConverter.toYarn(taskInfo.getTaskType());
+ if (taskInfo.getCounters() != null)
+ this.counters = TypeConverter.toYarn(taskInfo.getCounters());
+ if (taskInfo.getTaskStatus() != null) {
+ this.state = TaskState.valueOf(taskInfo.getTaskStatus());
+ } else {
+ this.state = TaskState.KILLED;
+ }
+ report = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskReport.class);
+ for (TaskAttemptInfo attemptHistory : taskInfo.getAllTaskAttempts()
+ .values()) {
+ CompletedTaskAttempt attempt = new CompletedTaskAttempt(taskId,
+ attemptHistory);
+ report.addAllDiagnostics(attempt.getDiagnostics()); //TODO TMI?
+ attempts.put(attempt.getID(), attempt);
+ if (attemptHistory.getTaskStatus() != null
+ && attemptHistory.getTaskStatus().equals(
+ TaskState.SUCCEEDED.toString())
+ && report.getSuccessfulAttempt() == null) {
+ report.setSuccessfulAttempt(TypeConverter.toYarn(attemptHistory
+ .getAttemptId()));
+ }
+ }
+ report.setTaskId(taskId);
+ report.setStartTime(startTime);
+ report.setFinishTime(finishTime);
+ report.setTaskState(state);
+ report.setProgress(getProgress());
+ report.setCounters(getCounters());
+ report.addAllRunningAttempts(new ArrayList<TaskAttemptId>(attempts.keySet()));
+ }
+
+ @Override
+ public boolean canCommit(TaskAttemptId taskAttemptID) {
+ return false;
+ }
+
+ @Override
+ public TaskAttempt getAttempt(TaskAttemptId attemptID) {
+ return attempts.get(attemptID);
+ }
+
+ @Override
+ public Map<TaskAttemptId, TaskAttempt> getAttempts() {
+ return attempts;
+ }
+
+ @Override
+ public Counters getCounters() {
+ return counters;
+ }
+
+ @Override
+ public TaskId getID() {
+ return taskId;
+ }
+
+ @Override
+ public float getProgress() {
+ return 1.0f;
+ }
+
+ @Override
+ public TaskReport getReport() {
+ return report;
+ }
+
+ @Override
+ public TaskType getType() {
+ return type;
+ }
+
+ @Override
+ public boolean isFinished() {
+ return true;
+ }
+
+ @Override
+ public TaskState getState() {
+ return state;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java
new file mode 100644
index 0000000..3759be5
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java
@@ -0,0 +1,149 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.hs;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo;
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+
+public class CompletedTaskAttempt implements TaskAttempt {
+
+ private final TaskAttemptInfo attemptInfo;
+ private final TaskAttemptId attemptId;
+ private Counters counters;
+ private final TaskAttemptState state;
+ private final TaskAttemptReport report;
+ private final List<String> diagnostics = new ArrayList<String>();
+
+ private String localDiagMessage;
+
+ CompletedTaskAttempt(TaskId taskId, TaskAttemptInfo attemptInfo) {
+ this.attemptInfo = attemptInfo;
+ this.attemptId = TypeConverter.toYarn(attemptInfo.getAttemptId());
+ if (attemptInfo.getCounters() != null)
+ this.counters = TypeConverter.toYarn(attemptInfo.getCounters());
+ if (attemptInfo.getTaskStatus() != null) {
+ this.state = TaskAttemptState.valueOf(attemptInfo.getTaskStatus());
+ } else {
+ this.state = TaskAttemptState.KILLED;
+ localDiagMessage = "Attmpt state missing from History : marked as KILLED";
+ diagnostics.add(localDiagMessage);
+ }
+
+ if (attemptInfo.getError() != null) {
+ diagnostics.add(attemptInfo.getError());
+ }
+
+ report = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskAttemptReport.class);
+ report.setCounters(counters);
+
+ report.setTaskAttemptId(attemptId);
+ report.setTaskAttemptState(state);
+ report.setProgress(getProgress());
+ report.setStartTime(attemptInfo.getStartTime());
+
+ report.setFinishTime(attemptInfo.getFinishTime());
+ if (localDiagMessage != null) {
+ report.setDiagnosticInfo(attemptInfo.getError() + ", " + localDiagMessage);
+ } else {
+ report.setDiagnosticInfo(attemptInfo.getError());
+ }
+// report.setPhase(attemptInfo.get); //TODO
+ report.setStateString(attemptInfo.getState());
+ report.setCounters(getCounters());
+ }
+
+ @Override
+ public ContainerId getAssignedContainerID() {
+ //TODO ContainerId needs to be part of some historyEvent to be able to render the log directory.
+ ContainerId containerId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ContainerId.class);
+ containerId.setId(-1);
+ containerId.setAppId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class));
+ containerId.getAppId().setId(-1);
+ containerId.getAppId().setClusterTimestamp(-1);
+ return containerId;
+ }
+
+ @Override
+ public String getAssignedContainerMgrAddress() {
+ // TODO Verify this is correct.
+ return attemptInfo.getTrackerName();
+ }
+
+ @Override
+ public String getNodeHttpAddress() {
+ return attemptInfo.getHostname() + ":" + attemptInfo.getHttpPort();
+ }
+
+ @Override
+ public Counters getCounters() {
+ return counters;
+ }
+
+ @Override
+ public TaskAttemptId getID() {
+ return attemptId;
+ }
+
+ @Override
+ public float getProgress() {
+ return 1.0f;
+ }
+
+ @Override
+ public TaskAttemptReport getReport() {
+ return report;
+ }
+
+ @Override
+ public TaskAttemptState getState() {
+ return state;
+ }
+
+ @Override
+ public boolean isFinished() {
+ return true;
+ }
+
+ @Override
+ public List<String> getDiagnostics() {
+ return diagnostics;
+ }
+
+ @Override
+ public long getLaunchTime() {
+ return report.getStartTime();
+ }
+
+ @Override
+ public long getFinishTime() {
+ return report.getFinishTime();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
new file mode 100644
index 0000000..a848edc
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
@@ -0,0 +1,294 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.hs;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
+import java.security.AccessControlException;
+import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
+import java.util.Collection;
+
+import org.apache.avro.ipc.Server;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.hs.webapp.HSWebApp;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JHConfig;
+import org.apache.hadoop.mapreduce.v2.security.client.ClientHSSecurityInfo;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.webapp.WebApp;
+import org.apache.hadoop.yarn.webapp.WebApps;
+
+/**
+ * This module is responsible for talking to the
+ * JobClient (user facing).
+ *
+ */
+public class HistoryClientService extends AbstractService {
+
+ private static final Log LOG = LogFactory.getLog(HistoryClientService.class);
+
+ private MRClientProtocol protocolHandler;
+ private Server server;
+ private WebApp webApp;
+ private InetSocketAddress bindAddress;
+ private HistoryContext history;
+
+ public HistoryClientService(HistoryContext history) {
+ super("HistoryClientService");
+ this.history = history;
+ this.protocolHandler = new MRClientProtocolHandler();
+ }
+
+ public void start() {
+ YarnRPC rpc = YarnRPC.create(getConfig());
+ Configuration conf = new Configuration(getConfig());
+ conf.setClass(
+ YarnConfiguration.YARN_SECURITY_INFO,
+ ClientHSSecurityInfo.class, SecurityInfo.class);
+ initializeWebApp(getConfig());
+ String serviceAddr = conf.get(JHConfig.HS_BIND_ADDRESS,
+ JHConfig.DEFAULT_HS_BIND_ADDRESS);
+ InetSocketAddress address = NetUtils.createSocketAddr(serviceAddr);
+ InetAddress hostNameResolved = null;
+ try {
+ hostNameResolved = InetAddress.getLocalHost(); //address.getAddress().getLocalHost();
+ } catch (UnknownHostException e) {
+ throw new YarnException(e);
+ }
+
+ server =
+ rpc.getServer(MRClientProtocol.class, protocolHandler, address,
+ conf, null,
+ conf.getInt(JHConfig.HS_CLIENT_THREADS,
+ JHConfig.DEFAULT_HS_CLIENT_THREADS));
+ server.start();
+ this.bindAddress =
+ NetUtils.createSocketAddr(hostNameResolved.getHostAddress()
+ + ":" + server.getPort());
+ LOG.info("Instantiated MRClientService at " + this.bindAddress);
+
+ super.start();
+ }
+
+ private void initializeWebApp(Configuration conf) {
+ webApp = new HSWebApp(history);
+ String bindAddress = conf.get(JHConfig.HS_WEBAPP_BIND_ADDRESS,
+ JHConfig.DEFAULT_HS_WEBAPP_BIND_ADDRESS);
+ WebApps.$for("yarn", this).at(bindAddress).start(webApp);
+ }
+
+ @Override
+ public void stop() {
+ if (server != null) {
+ server.close();
+ }
+ if (webApp != null) {
+ webApp.stop();
+ }
+ super.stop();
+ }
+
+ private class MRClientProtocolHandler implements MRClientProtocol {
+
+ private RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+ private Job verifyAndGetJob(final JobId jobID) throws YarnRemoteException {
+ UserGroupInformation loginUgi = null;
+ Job job = null;
+ try {
+ loginUgi = UserGroupInformation.getLoginUser();
+ job = loginUgi.doAs(new PrivilegedExceptionAction<Job>() {
+
+ @Override
+ public Job run() throws Exception {
+ Job job = history.getJob(jobID);
+ return job;
+ }
+ });
+ } catch (IOException e) {
+ throw RPCUtil.getRemoteException(e);
+ } catch (InterruptedException e) {
+ throw RPCUtil.getRemoteException(e);
+ }
+ if (job == null) {
+ throw RPCUtil.getRemoteException("Unknown job " + jobID);
+ }
+ JobACL operation = JobACL.VIEW_JOB;
+ //TODO disable check access for now.
+ checkAccess(job, operation);
+ return job;
+ }
+
+ @Override
+ public GetCountersResponse getCounters(GetCountersRequest request) throws YarnRemoteException {
+ JobId jobId = request.getJobId();
+ Job job = verifyAndGetJob(jobId);
+ GetCountersResponse response = recordFactory.newRecordInstance(GetCountersResponse.class);
+ response.setCounters(job.getCounters());
+ return response;
+ }
+
+ @Override
+ public GetJobReportResponse getJobReport(GetJobReportRequest request) throws YarnRemoteException {
+ JobId jobId = request.getJobId();
+ Job job = verifyAndGetJob(jobId);
+ GetJobReportResponse response = recordFactory.newRecordInstance(GetJobReportResponse.class);
+ response.setJobReport(job.getReport());
+ return response;
+ }
+
+ @Override
+ public GetTaskAttemptReportResponse getTaskAttemptReport(GetTaskAttemptReportRequest request) throws YarnRemoteException {
+ TaskAttemptId taskAttemptId = request.getTaskAttemptId();
+ Job job = verifyAndGetJob(taskAttemptId.getTaskId().getJobId());
+ GetTaskAttemptReportResponse response = recordFactory.newRecordInstance(GetTaskAttemptReportResponse.class);
+ response.setTaskAttemptReport(job.getTask(taskAttemptId.getTaskId()).getAttempt(taskAttemptId).getReport());
+ return response;
+ }
+
+ @Override
+ public GetTaskReportResponse getTaskReport(GetTaskReportRequest request) throws YarnRemoteException {
+ TaskId taskId = request.getTaskId();
+ Job job = verifyAndGetJob(taskId.getJobId());
+ GetTaskReportResponse response = recordFactory.newRecordInstance(GetTaskReportResponse.class);
+ response.setTaskReport(job.getTask(taskId).getReport());
+ return response;
+ }
+
+ @Override
+ public GetTaskAttemptCompletionEventsResponse getTaskAttemptCompletionEvents(GetTaskAttemptCompletionEventsRequest request) throws YarnRemoteException {
+ JobId jobId = request.getJobId();
+ int fromEventId = request.getFromEventId();
+ int maxEvents = request.getMaxEvents();
+
+ Job job = verifyAndGetJob(jobId);
+ GetTaskAttemptCompletionEventsResponse response = recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsResponse.class);
+ response.addAllCompletionEvents(Arrays.asList(job.getTaskAttemptCompletionEvents(fromEventId, maxEvents)));
+ return response;
+ }
+
+ @Override
+ public KillJobResponse killJob(KillJobRequest request) throws YarnRemoteException {
+ throw RPCUtil.getRemoteException("Invalid operation on completed job");
+ }
+
+ @Override
+ public KillTaskResponse killTask(KillTaskRequest request) throws YarnRemoteException {
+ throw RPCUtil.getRemoteException("Invalid operation on completed job");
+ }
+
+ @Override
+ public KillTaskAttemptResponse killTaskAttempt(KillTaskAttemptRequest request) throws YarnRemoteException {
+ throw RPCUtil.getRemoteException("Invalid operation on completed job");
+ }
+
+ @Override
+ public GetDiagnosticsResponse getDiagnostics(GetDiagnosticsRequest request) throws YarnRemoteException {
+ TaskAttemptId taskAttemptId = request.getTaskAttemptId();
+
+ Job job = verifyAndGetJob(taskAttemptId.getTaskId().getJobId());
+
+ GetDiagnosticsResponse response = recordFactory.newRecordInstance(GetDiagnosticsResponse.class);
+ response.addAllDiagnostics(job.getTask(taskAttemptId.getTaskId()).getAttempt(taskAttemptId).getDiagnostics());
+ return response;
+ }
+
+ @Override
+ public FailTaskAttemptResponse failTaskAttempt(FailTaskAttemptRequest request) throws YarnRemoteException {
+ throw RPCUtil.getRemoteException("Invalid operation on completed job");
+ }
+
+ @Override
+ public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request) throws YarnRemoteException {
+ JobId jobId = request.getJobId();
+ TaskType taskType = request.getTaskType();
+
+ GetTaskReportsResponse response = recordFactory.newRecordInstance(GetTaskReportsResponse.class);
+ Job job = verifyAndGetJob(jobId);
+ Collection<Task> tasks = job.getTasks(taskType).values();
+ for (Task task : tasks) {
+ response.addTaskReport(task.getReport());
+ }
+ return response;
+ }
+
+ private void checkAccess(Job job, JobACL jobOperation)
+ throws YarnRemoteException {
+ if (!UserGroupInformation.isSecurityEnabled()) {
+ return;
+ }
+ UserGroupInformation callerUGI;
+ try {
+ callerUGI = UserGroupInformation.getCurrentUser();
+ } catch (IOException e) {
+ throw RPCUtil.getRemoteException(e);
+ }
+ if (!job.checkAccess(callerUGI, jobOperation)) {
+ throw RPCUtil.getRemoteException(new AccessControlException("User "
+ + callerUGI.getShortUserName() + " cannot perform operation "
+ + jobOperation.name() + " on " + job.getID()));
+ }
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryContext.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryContext.java
new file mode 100644
index 0000000..0dfebf8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryContext.java
@@ -0,0 +1,31 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.hs;
+
+import java.util.Map;
+
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+
+public interface HistoryContext extends AppContext {
+
+ Map<JobId, Job> getAllJobs(ApplicationId appID);
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java
new file mode 100644
index 0000000..59635f2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java
@@ -0,0 +1,1158 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.hs;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.jobhistory.JobSummary;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JHConfig;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.service.AbstractService;
+
+
+/*
+ * Loads and manages the Job history cache.
+ */
+public class JobHistory extends AbstractService implements HistoryContext {
+
+ private static final int DEFAULT_JOBLIST_CACHE_SIZE = 20000;
+ private static final int DEFAULT_LOADEDJOB_CACHE_SIZE = 2000;
+ private static final int DEFAULT_DATESTRING_CACHE_SIZE = 200000;
+ private static final long DEFAULT_MOVE_THREAD_INTERVAL = 3 * 60 * 1000l; //3 minutes
+ private static final int DEFAULT_MOVE_THREAD_COUNT = 3;
+
+ static final long DEFAULT_HISTORY_MAX_AGE = 7 * 24 * 60 * 60 * 1000L; //1 week
+ static final long DEFAULT_RUN_INTERVAL = 1 * 24 * 60 * 60 * 1000l; //1 day
+
+ private static final Log LOG = LogFactory.getLog(JobHistory.class);
+
+ private static final Log SUMMARY_LOG = LogFactory.getLog(JobSummary.class);
+
+ private static final Pattern DATE_PATTERN = Pattern
+ .compile("([0-1]?[0-9])/([0-3]?[0-9])/((?:2[0-9])[0-9][0-9])");
+
+ /*
+ * TODO Get rid of this once JobId has it's own comparator
+ */
+ private static final Comparator<JobId> JOB_ID_COMPARATOR = new Comparator<JobId>() {
+ @Override
+ public int compare(JobId o1, JobId o2) {
+ if (o1.getAppId().getClusterTimestamp() > o2.getAppId().getClusterTimestamp()) {
+ return 1;
+ } else if (o1.getAppId().getClusterTimestamp() < o2.getAppId().getClusterTimestamp()) {
+ return -1;
+ } else {
+ return o1.getId() - o2.getId();
+ }
+ }
+ };
+
+ private static String DONE_BEFORE_SERIAL_TAIL = JobHistoryUtils.doneSubdirsBeforeSerialTail();
+
+ /**
+ * Maps between a serial number (generated based on jobId) and the timestamp
+ * component(s) to which it belongs.
+ * Facilitates jobId based searches.
+ * If a jobId is not found in this list - it will not be found.
+ */
+ private final SortedMap<String, Set<String>> idToDateString = new ConcurrentSkipListMap<String, Set<String>>();
+
+ //Maintains minimal details for recent jobs (parsed from history file name).
+ //Sorted on Job Completion Time.
+ private final SortedMap<JobId, MetaInfo> jobListCache = new ConcurrentSkipListMap<JobId, MetaInfo>(
+ JOB_ID_COMPARATOR);
+
+
+ // Re-use exisiting MetaInfo objects if they exist for the specific JobId. (synchronization on MetaInfo)
+ // Check for existance of the object when using iterators.
+ private final SortedMap<JobId, MetaInfo> intermediateListCache = new ConcurrentSkipListMap<JobId, JobHistory.MetaInfo>(
+ JOB_ID_COMPARATOR);
+
+ //Maintains a list of known done subdirectories. Not currently used.
+ private final Set<Path> existingDoneSubdirs = new HashSet<Path>();
+
+ private final SortedMap<JobId, Job> loadedJobCache = new ConcurrentSkipListMap<JobId, Job>(
+ JOB_ID_COMPARATOR);
+
+ /**
+ * Maintains a mapping between intermediate user directories and the last known modification time.
+ */
+ private Map<String, Long> userDirModificationTimeMap = new HashMap<String, Long>();
+
+ //The number of jobs to maintain in the job list cache.
+ private int jobListCacheSize;
+
+ //The number of loaded jobs.
+ private int loadedJobCacheSize;
+
+ //The number of entries in idToDateString
+ private int dateStringCacheSize;
+
+ //Time interval for the move thread.
+ private long moveThreadInterval;
+
+ //Number of move threads.
+ private int numMoveThreads;
+
+ private Configuration conf;
+
+ private boolean debugMode;
+ private int serialNumberLowDigits;
+ private String serialNumberFormat;
+
+
+ private Path doneDirPrefixPath = null; // folder for completed jobs
+ private FileContext doneDirFc; // done Dir FileContext
+
+ private Path intermediateDoneDirPath = null; //Intermediate Done Dir Path
+ private FileContext intermediateDoneDirFc; //Intermediate Done Dir FileContext
+
+ private Thread moveIntermediateToDoneThread = null;
+ private MoveIntermediateToDoneRunnable moveIntermediateToDoneRunnable = null;
+ private ScheduledThreadPoolExecutor cleanerScheduledExecutor = null;
+
+ /**
+ * Writes out files to the path
+ * .....${DONE_DIR}/VERSION_STRING/YYYY/MM/DD/HH/SERIAL_NUM/jh{index_entries}.jhist
+ */
+
+ @Override
+ public void init(Configuration conf) throws YarnException {
+ LOG.info("JobHistory Init");
+ this.conf = conf;
+ this.appID = RecordFactoryProvider.getRecordFactory(conf)
+ .newRecordInstance(ApplicationId.class);
+ this.appAttemptID = RecordFactoryProvider.getRecordFactory(conf)
+ .newRecordInstance(ApplicationAttemptId.class);
+
+ debugMode = conf.getBoolean(JHConfig.HISTORY_DEBUG_MODE_KEY, false);
+ serialNumberLowDigits = debugMode ? 1 : 3;
+ serialNumberFormat = ("%0"
+ + (JobHistoryUtils.SERIAL_NUMBER_DIRECTORY_DIGITS + serialNumberLowDigits) + "d");
+
+ String doneDirPrefix = null;
+ doneDirPrefix = JobHistoryUtils.getConfiguredHistoryServerDoneDirPrefix(conf);
+ try {
+ doneDirPrefixPath = FileContext.getFileContext(conf).makeQualified(
+ new Path(doneDirPrefix));
+ doneDirFc = FileContext.getFileContext(doneDirPrefixPath.toUri(), conf);
+ mkdir(doneDirFc, doneDirPrefixPath, new FsPermission(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION));
+ } catch (IOException e) {
+ throw new YarnException("Error creating done directory: [" + doneDirPrefixPath + "]", e);
+ }
+
+ String intermediateDoneDirPrefix = null;
+ intermediateDoneDirPrefix = JobHistoryUtils
+ .getConfiguredHistoryIntermediateDoneDirPrefix(conf);
+ try {
+ intermediateDoneDirPath = FileContext.getFileContext(conf)
+ .makeQualified(new Path(intermediateDoneDirPrefix));
+ intermediateDoneDirFc = FileContext.getFileContext(
+ intermediateDoneDirPath.toUri(), conf);
+ mkdir(intermediateDoneDirFc, intermediateDoneDirPath, new FsPermission(JobHistoryUtils.HISTORY_INTERMEDIATE_DONE_DIR_PERMISSIONS.toShort()));
+ } catch (IOException e) {
+ LOG.info("error creating done directory on dfs " + e);
+ throw new YarnException("Error creating intermediate done directory: [" + intermediateDoneDirPath + "]", e);
+ }
+
+
+
+ jobListCacheSize = conf.getInt(JHConfig.HISTORY_SERVER_JOBLIST_CACHE_SIZE_KEY, DEFAULT_JOBLIST_CACHE_SIZE);
+ loadedJobCacheSize = conf.getInt(JHConfig.HISTORY_SERVER_LOADED_JOB_CACHE_SIZE_KEY, DEFAULT_LOADEDJOB_CACHE_SIZE);
+ dateStringCacheSize = conf.getInt(JHConfig.HISTORY_SERVER_DATESTRING_CACHE_SIZE_KEY, DEFAULT_DATESTRING_CACHE_SIZE);
+ moveThreadInterval =
+ conf.getLong(JHConfig.HISTORY_SERVER_MOVE_THREAD_INTERVAL,
+ DEFAULT_MOVE_THREAD_INTERVAL);
+ numMoveThreads = conf.getInt(JHConfig.HISTORY_SERVER_NUM_MOVE_THREADS, DEFAULT_MOVE_THREAD_COUNT);
+ try {
+ initExisting();
+ } catch (IOException e) {
+ throw new YarnException("Failed to intialize existing directories", e);
+ }
+ super.init(conf);
+ }
+
+ private void mkdir(FileContext fc, Path path, FsPermission fsp)
+ throws IOException {
+ if (!fc.util().exists(path)) {
+ try {
+ fc.mkdir(path, fsp, true);
+
+ FileStatus fsStatus = fc.getFileStatus(path);
+ LOG.info("Perms after creating " + fsStatus.getPermission().toShort()
+ + ", Expected: " + fsp.toShort());
+ if (fsStatus.getPermission().toShort() != fsp.toShort()) {
+ LOG.info("Explicitly setting permissions to : " + fsp.toShort()
+ + ", " + fsp);
+ fc.setPermission(path, fsp);
+ }
+ } catch (FileAlreadyExistsException e) {
+ LOG.info("Directory: [" + path + "] already exists.");
+ }
+ }
+ }
+
+ @Override
+ public void start() {
+ //Start moveIntermediatToDoneThread
+ moveIntermediateToDoneRunnable = new MoveIntermediateToDoneRunnable(moveThreadInterval, numMoveThreads);
+ moveIntermediateToDoneThread = new Thread(moveIntermediateToDoneRunnable);
+ moveIntermediateToDoneThread.setName("MoveIntermediateToDoneScanner");
+ moveIntermediateToDoneThread.start();
+
+ //Start historyCleaner
+ boolean startCleanerService = conf.getBoolean(JHConfig.RUN_HISTORY_CLEANER_KEY, true);
+ if (startCleanerService) {
+ long maxAgeOfHistoryFiles = conf.getLong(JHConfig.HISTORY_MAXAGE,
+ DEFAULT_HISTORY_MAX_AGE);
+ cleanerScheduledExecutor = new ScheduledThreadPoolExecutor(1);
+ long runInterval = conf.getLong(JHConfig.HISTORY_CLEANER_RUN_INTERVAL,
+ DEFAULT_RUN_INTERVAL);
+ cleanerScheduledExecutor
+ .scheduleAtFixedRate(new HistoryCleaner(maxAgeOfHistoryFiles),
+ 30 * 1000l, runInterval, TimeUnit.MILLISECONDS);
+ }
+ super.start();
+ }
+
+ @Override
+ public void stop() {
+ LOG.info("Stopping JobHistory");
+ if (moveIntermediateToDoneThread != null) {
+ LOG.info("Stopping move thread");
+ moveIntermediateToDoneRunnable.stop();
+ moveIntermediateToDoneThread.interrupt();
+ try {
+ LOG.info("Joining on move thread");
+ moveIntermediateToDoneThread.join();
+ } catch (InterruptedException e) {
+ LOG.info("Interrupted while stopping move thread");
+ }
+ }
+
+ if (cleanerScheduledExecutor != null) {
+ LOG.info("Stopping History Cleaner");
+ cleanerScheduledExecutor.shutdown();
+ boolean interrupted = false;
+ long currentTime = System.currentTimeMillis();
+ while (!cleanerScheduledExecutor.isShutdown()
+ && System.currentTimeMillis() > currentTime + 1000l && !interrupted) {
+ try {
+ Thread.sleep(20);
+ } catch (InterruptedException e) {
+ interrupted = true;
+ }
+ }
+ if (!cleanerScheduledExecutor.isShutdown()) {
+ LOG.warn("HistoryCleanerService shutdown may not have succeeded");
+ }
+ }
+ super.stop();
+ }
+
+ public JobHistory() {
+ super(JobHistory.class.getName());
+ }
+
+ /**
+ * Populates index data structures.
+ * Should only be called at initialization times.
+ */
+ @SuppressWarnings("unchecked")
+ private void initExisting() throws IOException {
+ List<FileStatus> timestampedDirList = findTimestampedDirectories();
+ Collections.sort(timestampedDirList);
+ for (FileStatus fs : timestampedDirList) {
+ //TODO Could verify the correct format for these directories.
+ addDirectoryToSerialNumberIndex(fs.getPath());
+ addDirectoryToJobListCache(fs.getPath());
+ }
+ }
+
+ private void removeDirectoryFromSerialNumberIndex(Path serialDirPath) {
+ String serialPart = serialDirPath.getName();
+ String timeStampPart = JobHistoryUtils.getTimestampPartFromPath(serialDirPath.toString());
+ if (timeStampPart == null) {
+ LOG.warn("Could not find timestamp portion from path: " + serialDirPath.toString() +". Continuing with next");
+ return;
+ }
+ if (serialPart == null) {
+ LOG.warn("Could not find serial portion from path: " + serialDirPath.toString() + ". Continuing with next");
+ return;
+ }
+ if (idToDateString.containsKey(serialPart)) {
+ Set<String> set = idToDateString.get(serialPart);
+ set.remove(timeStampPart);
+ if (set.isEmpty()) {
+ idToDateString.remove(serialPart);
+ }
+ }
+
+ }
+
+ private void addDirectoryToSerialNumberIndex(Path serialDirPath) {
+ String serialPart = serialDirPath.getName();
+ String timestampPart = JobHistoryUtils.getTimestampPartFromPath(serialDirPath.toString());
+ if (timestampPart == null) {
+ LOG.warn("Could not find timestamp portion from path: " + serialDirPath.toString() +". Continuing with next");
+ return;
+ }
+ if (serialPart == null) {
+ LOG.warn("Could not find serial portion from path: " + serialDirPath.toString() + ". Continuing with next");
+ }
+ addToSerialNumberIndex(serialPart, timestampPart);
+ }
+
+ private void addToSerialNumberIndex(String serialPart, String timestampPart) {
+ if (!idToDateString.containsKey(serialPart)) {
+ idToDateString.put(serialPart, new HashSet<String>());
+ if (idToDateString.size() > dateStringCacheSize) {
+ idToDateString.remove(idToDateString.firstKey());
+ }
+ Set<String> datePartSet = idToDateString.get(serialPart);
+ datePartSet.add(timestampPart);
+ }
+ }
+
+ private void addDirectoryToJobListCache(Path path) throws IOException {
+ List<FileStatus> historyFileList = scanDirectoryForHistoryFiles(path,
+ doneDirFc);
+ for (FileStatus fs : historyFileList) {
+ JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(fs.getPath()
+ .getName());
+ String confFileName = JobHistoryUtils
+ .getIntermediateConfFileName(jobIndexInfo.getJobId());
+ String summaryFileName = JobHistoryUtils
+ .getIntermediateSummaryFileName(jobIndexInfo.getJobId());
+ MetaInfo metaInfo = new MetaInfo(fs.getPath(), new Path(fs.getPath()
+ .getParent(), confFileName), new Path(fs.getPath().getParent(),
+ summaryFileName), jobIndexInfo);
+ addToJobListCache(jobIndexInfo.getJobId(), metaInfo);
+ }
+ }
+
+ private static List<FileStatus> scanDirectory(Path path, FileContext fc, PathFilter pathFilter) throws IOException {
+ path = fc.makeQualified(path);
+ List<FileStatus> jhStatusList = new ArrayList<FileStatus>();
+ RemoteIterator<FileStatus> fileStatusIter = fc.listStatus(path);
+ while (fileStatusIter.hasNext()) {
+ FileStatus fileStatus = fileStatusIter.next();
+ Path filePath = fileStatus.getPath();
+ if (fileStatus.isFile() && pathFilter.accept(filePath)) {
+ jhStatusList.add(fileStatus);
+ }
+ }
+ return jhStatusList;
+ }
+
+ private static List<FileStatus> scanDirectoryForHistoryFiles(Path path, FileContext fc) throws IOException {
+ return scanDirectory(path, fc, JobHistoryUtils.getHistoryFileFilter());
+ }
+
+ /**
+ * Finds all history directories with a timestamp component by scanning
+ * the filesystem.
+ * Used when the JobHistory server is started.
+ * @return
+ */
+ private List<FileStatus> findTimestampedDirectories() throws IOException {
+ List<FileStatus> fsList = JobHistoryUtils.localGlobber(doneDirFc, doneDirPrefixPath, DONE_BEFORE_SERIAL_TAIL);
+ return fsList;
+ }
+
+ /**
+ * Adds an entry to the job list cache. Maintains the size.
+ */
+ private void addToJobListCache(JobId jobId, MetaInfo metaInfo) {
+ jobListCache.put(jobId, metaInfo);
+ if (jobListCache.size() > jobListCacheSize) {
+ jobListCache.remove(jobListCache.firstKey());
+ }
+ }
+
+ /**
+ * Adds an entry to the loaded job cache. Maintains the size.
+ */
+ private void addToLoadedJobCache(Job job) {
+ loadedJobCache.put(job.getID(), job);
+ if (loadedJobCache.size() > loadedJobCacheSize ) {
+ loadedJobCache.remove(loadedJobCache.firstKey());
+ }
+ }
+
+
+ /**
+ * Scans the intermediate directory to find user directories. Scans these
+ * for history files if the modification time for the directory has changed.
+ * @throws IOException
+ */
+ private void scanIntermediateDirectory() throws IOException {
+ List<FileStatus> userDirList = JobHistoryUtils.localGlobber(intermediateDoneDirFc, intermediateDoneDirPath, "");
+
+ for (FileStatus userDir : userDirList) {
+ String name = userDir.getPath().getName();
+ long newModificationTime = userDir.getModificationTime();
+ boolean shouldScan = false;
+ synchronized (userDirModificationTimeMap) {
+ if (!userDirModificationTimeMap.containsKey(name) || newModificationTime > userDirModificationTimeMap.get(name)) {
+ shouldScan = true;
+ userDirModificationTimeMap.put(name, newModificationTime);
+ }
+ }
+ if (shouldScan) {
+ scanIntermediateDirectory(userDir.getPath());
+ }
+ }
+ }
+
+ /**
+ * Scans the specified path and populates the intermediate cache.
+ * @param absPath
+ * @throws IOException
+ */
+ private void scanIntermediateDirectory(final Path absPath)
+ throws IOException {
+ List<FileStatus> fileStatusList = scanDirectoryForHistoryFiles(absPath,
+ intermediateDoneDirFc);
+ for (FileStatus fs : fileStatusList) {
+ JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(fs.getPath()
+ .getName());
+ String confFileName = JobHistoryUtils
+ .getIntermediateConfFileName(jobIndexInfo.getJobId());
+ String summaryFileName = JobHistoryUtils
+ .getIntermediateSummaryFileName(jobIndexInfo.getJobId());
+ MetaInfo metaInfo = new MetaInfo(fs.getPath(), new Path(fs.getPath()
+ .getParent(), confFileName), new Path(fs.getPath().getParent(),
+ summaryFileName), jobIndexInfo);
+ if (!intermediateListCache.containsKey(jobIndexInfo.getJobId())) {
+ intermediateListCache.put(jobIndexInfo.getJobId(), metaInfo);
+ }
+ }
+ }
+
+ /**
+ * Searches the job history file FileStatus list for the specified JobId.
+ *
+ * @param fileStatusList fileStatus list of Job History Files.
+ * @param jobId The JobId to find.
+ * @param checkForDoneFile whether to check for the existance of a done file.
+ * @return A MetaInfo object for the jobId, null if not found.
+ * @throws IOException
+ */
+ private MetaInfo getJobMetaInfo(List<FileStatus> fileStatusList, JobId jobId) throws IOException {
+ for (FileStatus fs : fileStatusList) {
+ JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(fs.getPath().getName());
+ if (jobIndexInfo.getJobId().equals(jobId)) {
+ String confFileName = JobHistoryUtils
+ .getIntermediateConfFileName(jobIndexInfo.getJobId());
+ String summaryFileName = JobHistoryUtils
+ .getIntermediateSummaryFileName(jobIndexInfo.getJobId());
+ MetaInfo metaInfo = new MetaInfo(fs.getPath(), new Path(fs.getPath()
+ .getParent(), confFileName), new Path(fs.getPath().getParent(),
+ summaryFileName), jobIndexInfo);
+ return metaInfo;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Scans old directories known by the idToDateString map for the specified
+ * jobId.
+ * If the number of directories is higher than the supported size of the
+ * idToDateString cache, the jobId will not be found.
+ * @param jobId the jobId.
+ * @return
+ * @throws IOException
+ */
+ private MetaInfo scanOldDirsForJob(JobId jobId) throws IOException {
+ int jobSerialNumber = JobHistoryUtils.jobSerialNumber(jobId);
+ String boxedSerialNumber = String.valueOf(jobSerialNumber);
+ Set<String> dateStringSet = idToDateString.get(boxedSerialNumber);
+ if (dateStringSet == null) {
+ return null;
+ }
+ for (String timestampPart : dateStringSet) {
+ Path logDir = canonicalHistoryLogPath(jobId, timestampPart);
+ List<FileStatus> fileStatusList = scanDirectoryForHistoryFiles(logDir, doneDirFc);
+ MetaInfo metaInfo = getJobMetaInfo(fileStatusList, jobId);
+ if (metaInfo != null) {
+ return metaInfo;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Checks for the existence of the job history file in the interemediate directory.
+ * @param jobId
+ * @return
+ * @throws IOException
+ */
+ private MetaInfo scanIntermediateForJob(JobId jobId) throws IOException {
+ scanIntermediateDirectory();
+ return intermediateListCache.get(jobId);
+ }
+
+ @Override
+ public String getApplicationName() {
+ return "Job History Server";
+ }
+
+ private class MoveIntermediateToDoneRunnable implements Runnable {
+
+ private long sleepTime;
+ private ThreadPoolExecutor moveToDoneExecutor = null;
+ private boolean running = false;
+
+ public void stop() {
+ running = false;
+ }
+
+ MoveIntermediateToDoneRunnable(long sleepTime, int numMoveThreads) {
+ this.sleepTime = sleepTime;
+ moveToDoneExecutor = new ThreadPoolExecutor(1, numMoveThreads, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>());
+ running = true;
+ }
+
+ @Override
+ public void run() {
+ Thread.currentThread().setName("IntermediateHistoryScanner");
+ try {
+ while (running) {
+ LOG.info("Starting scan to move intermediate done files");
+ scanIntermediateDirectory();
+ for (final MetaInfo metaInfo : intermediateListCache.values()) {
+ moveToDoneExecutor.execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ moveToDone(metaInfo);
+ } catch (IOException e) {
+ LOG.info("Failed to process metaInfo for job: " + metaInfo.jobIndexInfo.getJobId(), e);
+ }
+ }
+ });
+
+ }
+ synchronized (this) { // TODO Is this really required.
+ try {
+ this.wait(sleepTime);
+ } catch (InterruptedException e) {
+ LOG.info("IntermediateHistoryScannerThread interrupted");
+ }
+ }
+ }
+ } catch (IOException e) {
+ LOG.warn("Unable to get a list of intermediate files to be moved from: "
+ + intermediateDoneDirPath);
+ }
+ }
+ }
+
+ private Job loadJob(MetaInfo metaInfo) {
+ synchronized(metaInfo) {
+ try {
+ Job job = new CompletedJob(conf, metaInfo.getJobIndexInfo().getJobId(), metaInfo.getHistoryFile(), true);
+ addToLoadedJobCache(job);
+ return job;
+ } catch (IOException e) {
+ throw new YarnException("Could not find/load job: " + metaInfo.getJobIndexInfo().getJobId(), e);
+ }
+ }
+ }
+
+ private SortedMap<JobId, JobIndexInfo> getAllJobsMetaInfo() {
+ SortedMap<JobId, JobIndexInfo> result = new TreeMap<JobId, JobIndexInfo>(JOB_ID_COMPARATOR);
+ try {
+ scanIntermediateDirectory();
+ } catch (IOException e) {
+ LOG.warn("Failed to scan intermediate directory", e);
+ throw new YarnException(e);
+ }
+ for (JobId jobId : intermediateListCache.keySet()) {
+ MetaInfo mi = intermediateListCache.get(jobId);
+ if (mi != null) {
+ result.put(jobId, mi.getJobIndexInfo());
+ }
+ }
+ for (JobId jobId : jobListCache.keySet()) {
+ MetaInfo mi = jobListCache.get(jobId);
+ if (mi != null) {
+ result.put(jobId, mi.getJobIndexInfo());
+ }
+ }
+ return result;
+ }
+
+ private Map<JobId, Job> getAllJobsInternal() {
+ //TODO This should ideally be using getAllJobsMetaInfo
+ // or get rid of that method once Job has APIs for user, finishTime etc.
+ SortedMap<JobId, Job> result = new TreeMap<JobId, Job>(JOB_ID_COMPARATOR);
+ try {
+ scanIntermediateDirectory();
+ } catch (IOException e) {
+ LOG.warn("Failed to scan intermediate directory", e);
+ throw new YarnException(e);
+ }
+ for (JobId jobId : intermediateListCache.keySet()) {
+ MetaInfo mi = intermediateListCache.get(jobId);
+ if (mi != null) {
+ result.put(jobId, new PartialJob(mi.getJobIndexInfo(), mi
+ .getJobIndexInfo().getJobId()));
+ }
+ }
+ for (JobId jobId : jobListCache.keySet()) {
+ MetaInfo mi = jobListCache.get(jobId);
+ if (mi != null) {
+ result.put(jobId, new PartialJob(mi.getJobIndexInfo(), mi
+ .getJobIndexInfo().getJobId()));
+ }
+ }
+ return result;
+ }
+
+ /**
+ * Helper method for test cases.
+ */
+ MetaInfo getJobMetaInfo(JobId jobId) throws IOException {
+ //MetaInfo available in cache.
+ MetaInfo metaInfo = null;
+ if (jobListCache.containsKey(jobId)) {
+ metaInfo = jobListCache.get(jobId);
+ }
+
+ if (metaInfo != null) {
+ return metaInfo;
+ }
+
+ //MetaInfo not available. Check intermediate directory for meta info.
+ metaInfo = scanIntermediateForJob(jobId);
+ if (metaInfo != null) {
+ return metaInfo;
+ }
+
+ //Intermediate directory does not contain job. Search through older ones.
+ metaInfo = scanOldDirsForJob(jobId);
+ if (metaInfo != null) {
+ return metaInfo;
+ }
+ return null;
+ }
+
+ private Job findJob(JobId jobId) throws IOException {
+ //Job already loaded.
+ if (loadedJobCache.containsKey(jobId)) {
+ return loadedJobCache.get(jobId);
+ }
+
+ //MetaInfo available in cache.
+ MetaInfo metaInfo = null;
+ if (jobListCache.containsKey(jobId)) {
+ metaInfo = jobListCache.get(jobId);
+ }
+
+ if (metaInfo != null) {
+ return loadJob(metaInfo);
+ }
+
+ //MetaInfo not available. Check intermediate directory for meta info.
+ metaInfo = scanIntermediateForJob(jobId);
+ if (metaInfo != null) {
+ return loadJob(metaInfo);
+ }
+
+ //Intermediate directory does not contain job. Search through older ones.
+ metaInfo = scanOldDirsForJob(jobId);
+ if (metaInfo != null) {
+ return loadJob(metaInfo);
+ }
+ return null;
+ }
+
+ /**
+ * Searches cached jobs for the specified criteria (AND). Ignores the criteria if null.
+ * @param soughtUser
+ * @param soughtJobNameSubstring
+ * @param soughtDateStrings
+ * @return
+ */
+ private Map<JobId, Job> findJobs(String soughtUser, String soughtJobNameSubstring, String[] soughtDateStrings) {
+ boolean searchUser = true;
+ boolean searchJobName = true;
+ boolean searchDates = true;
+ List<Calendar> soughtCalendars = null;
+
+ if (soughtUser == null) {
+ searchUser = false;
+ }
+ if (soughtJobNameSubstring == null) {
+ searchJobName = false;
+ }
+ if (soughtDateStrings == null) {
+ searchDates = false;
+ } else {
+ soughtCalendars = getSoughtDateAsCalendar(soughtDateStrings);
+ }
+
+ Map<JobId, Job> resultMap = new TreeMap<JobId, Job>();
+
+ SortedMap<JobId, JobIndexInfo> allJobs = getAllJobsMetaInfo();
+ for (Map.Entry<JobId, JobIndexInfo> entry : allJobs.entrySet()) {
+ JobId jobId = entry.getKey();
+ JobIndexInfo indexInfo = entry.getValue();
+ String jobName = indexInfo.getJobName();
+ String jobUser = indexInfo.getUser();
+ long finishTime = indexInfo.getFinishTime();
+
+ if (searchUser) {
+ if (!soughtUser.equals(jobUser)) {
+ continue;
+ }
+ }
+
+ if (searchJobName) {
+ if (!jobName.contains(soughtJobNameSubstring)) {
+ continue;
+ }
+ }
+
+ if (searchDates) {
+ boolean matchedDate = false;
+ Calendar jobCal = Calendar.getInstance();
+ jobCal.setTimeInMillis(finishTime);
+ for (Calendar cal : soughtCalendars) {
+ if (jobCal.get(Calendar.YEAR) == cal.get(Calendar.YEAR) &&
+ jobCal.get(Calendar.MONTH) == cal.get(Calendar.MONTH) &&
+ jobCal.get(Calendar.DAY_OF_MONTH) == cal.get(Calendar.DAY_OF_MONTH)) {
+ matchedDate = true;
+ break;
+ }
+ }
+ if (!matchedDate) {
+ break;
+ }
+ }
+ resultMap.put(jobId, new PartialJob(indexInfo, jobId));
+ }
+ return resultMap;
+ }
+
+ private List<Calendar> getSoughtDateAsCalendar(String [] soughtDateStrings) {
+ List<Calendar> soughtCalendars = new ArrayList<Calendar>();
+ for (int i = 0 ; i < soughtDateStrings.length ; i++) {
+ String soughtDate = soughtDateStrings[i];
+ if (soughtDate.length() != 0) {
+ Matcher m = DATE_PATTERN.matcher(soughtDate);
+ if (m.matches()) {
+ String yyyyPart = m.group(3);
+ String mmPart = m.group(1);
+ String ddPart = m.group(2);
+
+ if (yyyyPart.length() == 2) {
+ yyyyPart = "20" + yyyyPart;
+ }
+ if (mmPart.length() == 1) {
+ mmPart = "0" + mmPart;
+ }
+ if (ddPart.length() == 1) {
+ ddPart = "0" + ddPart;
+ }
+ Calendar soughtCal = Calendar.getInstance();
+ soughtCal.set(Calendar.YEAR, Integer.parseInt(yyyyPart));
+ soughtCal.set(Calendar.MONTH, Integer.parseInt(mmPart) - 1);
+ soughtCal.set(Calendar.DAY_OF_MONTH, Integer.parseInt(ddPart) -1);
+ soughtCalendars.add(soughtCal);
+ }
+ }
+ }
+ return soughtCalendars;
+ }
+
+
+
+
+ private void moveToDone(MetaInfo metaInfo) throws IOException {
+ long completeTime = metaInfo.getJobIndexInfo().getFinishTime();
+ if (completeTime == 0) completeTime = System.currentTimeMillis();
+ JobId jobId = metaInfo.getJobIndexInfo().getJobId();
+
+ List<Path> paths = new ArrayList<Path>();
+ Path historyFile = metaInfo.getHistoryFile();
+ if (historyFile == null) {
+ LOG.info("No file for job-history with " + jobId + " found in cache!");
+ } else {
+ paths.add(historyFile);
+ }
+
+ Path confFile = metaInfo.getConfFile();
+ if (confFile == null) {
+ LOG.info("No file for jobConf with " + jobId + " found in cache!");
+ } else {
+ paths.add(confFile);
+ }
+
+ //TODO Check all mi getters and setters for the conf path
+ Path summaryFile = metaInfo.getSummaryFile();
+ if (summaryFile == null) {
+ LOG.info("No summary file for job: " + jobId);
+ } else {
+ try {
+ String jobSummaryString = getJobSummary(intermediateDoneDirFc, summaryFile);
+ SUMMARY_LOG.info(jobSummaryString);
+ LOG.info("Deleting JobSummary file: [" + summaryFile + "]");
+ intermediateDoneDirFc.delete(summaryFile, false);
+ metaInfo.setSummaryFile(null);
+ } catch (IOException e) {
+ LOG.warn("Failed to process summary file: [" + summaryFile + "]");
+ throw e;
+ }
+ }
+
+ Path targetDir = canonicalHistoryLogPath(jobId, completeTime);
+ addDirectoryToSerialNumberIndex(targetDir);
+ try {
+ maybeMakeSubdirectory(targetDir);
+ } catch (IOException e) {
+ LOG.warn("Failed creating subdirectory: " + targetDir + " while attempting to move files for jobId: " + jobId);
+ throw e;
+ }
+ synchronized (metaInfo) {
+ if (historyFile != null) {
+ Path toPath = doneDirFc.makeQualified(new Path(targetDir, historyFile.getName()));
+ try {
+ moveToDoneNow(historyFile, toPath);
+ } catch (IOException e) {
+ LOG.warn("Failed to move file: " + historyFile + " for jobId: " + jobId);
+ throw e;
+ }
+ metaInfo.setHistoryFile(toPath);
+ }
+ if (confFile != null) {
+ Path toPath = doneDirFc.makeQualified(new Path(targetDir, confFile.getName()));
+ try {
+ moveToDoneNow(confFile, toPath);
+ } catch (IOException e) {
+ LOG.warn("Failed to move file: " + historyFile + " for jobId: " + jobId);
+ throw e;
+ }
+ metaInfo.setConfFile(toPath);
+ }
+ }
+ addToJobListCache(jobId, metaInfo);
+ intermediateListCache.remove(jobId);
+ }
+
+ private void moveToDoneNow(final Path src, final Path target)
+ throws IOException {
+ LOG.info("Moving " + src.toString() + " to " + target.toString());
+ intermediateDoneDirFc.rename(src, target, Options.Rename.NONE);
+ // fc.util().copy(src, target);
+ //fc.delete(src, false);
+ //intermediateDoneDirFc.setPermission(target, new FsPermission(
+ //JobHistoryUtils.HISTORY_DONE_FILE_PERMISSION));
+ }
+
+ String getJobSummary(FileContext fc, Path path) throws IOException {
+ Path qPath = fc.makeQualified(path);
+ FSDataInputStream in = fc.open(qPath);
+ String jobSummaryString = in.readUTF();
+ in.close();
+ return jobSummaryString;
+ }
+
+ private void maybeMakeSubdirectory(Path path) throws IOException {
+ boolean existsInExistingCache = false;
+ synchronized(existingDoneSubdirs) {
+ if (existingDoneSubdirs.contains(path)) existsInExistingCache = true;
+ }
+ try {
+ doneDirFc.getFileStatus(path);
+ if (!existsInExistingCache) {
+ existingDoneSubdirs.add(path);
+ if (debugMode) {
+ LOG.info("JobHistory.maybeMakeSubdirectory -- We believed "
+ + path + " already existed, but it didn't.");
+ }
+ }
+ } catch (FileNotFoundException fnfE) {
+ try {
+ FsPermission fsp = new FsPermission(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION);
+ doneDirFc.mkdir(path, fsp, true);
+ FileStatus fsStatus = doneDirFc.getFileStatus(path);
+ LOG.info("Perms after creating " + fsStatus.getPermission().toShort()
+ + ", Expected: " + fsp.toShort());
+ if (fsStatus.getPermission().toShort() != fsp.toShort()) {
+ LOG.info("Explicitly setting permissions to : " + fsp.toShort()
+ + ", " + fsp);
+ doneDirFc.setPermission(path, fsp);
+ }
+ synchronized(existingDoneSubdirs) {
+ existingDoneSubdirs.add(path);
+ }
+ } catch (FileAlreadyExistsException faeE) { //Nothing to do.
+ }
+ }
+ }
+
+ private Path canonicalHistoryLogPath(JobId id, String timestampComponent) {
+ return new Path(doneDirPrefixPath, JobHistoryUtils.historyLogSubdirectory(id, timestampComponent, serialNumberFormat));
+ }
+
+ private Path canonicalHistoryLogPath(JobId id, long millisecondTime) {
+ String timestampComponent = JobHistoryUtils.timestampDirectoryComponent(millisecondTime, debugMode);
+ return new Path(doneDirPrefixPath, JobHistoryUtils.historyLogSubdirectory(id, timestampComponent, serialNumberFormat));
+ }
+
+
+ @Override
+ public synchronized Job getJob(JobId jobId) {
+ Job job = null;
+ try {
+ job = findJob(jobId);
+ //This could return a null job.
+ } catch (IOException e) {
+ throw new YarnException(e);
+ }
+ return job;
+ }
+
+ @Override
+ public Map<JobId, Job> getAllJobs(ApplicationId appID) {
+ LOG.info("Called getAllJobs(AppId): " + appID);
+// currently there is 1 to 1 mapping between app and job id
+ org.apache.hadoop.mapreduce.JobID oldJobID = TypeConverter.fromYarn(appID);
+ Map<JobId, Job> jobs = new HashMap<JobId, Job>();
+ JobId jobID = TypeConverter.toYarn(oldJobID);
+ jobs.put(jobID, getJob(jobID));
+ return jobs;
+// return getAllJobs();
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hadoop.mapreduce.v2.hs.HistoryContext#getAllJobs()
+ *
+ * Returns a recent list of jobs. This may not be the complete set.
+ * If a previous jobId is known - it can be queries via the getJob(JobId)
+ * method.
+ * Size of this list is determined by the size of the job list cache.
+ * This can be fixed when pagination is implemented - return the first set of
+ * jobs via the cache, go to DFS only when an attempt is made to navigate
+ * past the cached list.
+ * This does involve a DFS oepration of scanning the intermediate directory.
+ */
+ public Map<JobId, Job> getAllJobs() {
+ return getAllJobsInternal();
+ }
+
+
+
+
+
+ static class MetaInfo {
+ private Path historyFile;
+ private Path confFile;
+ private Path summaryFile;
+ JobIndexInfo jobIndexInfo;
+
+ MetaInfo(Path historyFile, Path confFile, Path summaryFile, JobIndexInfo jobIndexInfo) {
+ this.historyFile = historyFile;
+ this.confFile = confFile;
+ this.summaryFile = summaryFile;
+ this.jobIndexInfo = jobIndexInfo;
+ }
+
+ Path getHistoryFile() { return historyFile; }
+ Path getConfFile() { return confFile; }
+ Path getSummaryFile() { return summaryFile; }
+ JobIndexInfo getJobIndexInfo() { return jobIndexInfo; }
+
+ void setHistoryFile(Path historyFile) { this.historyFile = historyFile; }
+ void setConfFile(Path confFile) {this.confFile = confFile; }
+ void setSummaryFile(Path summaryFile) { this.summaryFile = summaryFile; }
+ }
+
+
+ public class HistoryCleaner implements Runnable {
+ private long currentTime;
+
+ long maxAgeMillis;
+ long filesDeleted = 0;
+ long dirsDeleted = 0;
+
+ public HistoryCleaner(long maxAge) {
+ this.maxAgeMillis = maxAge;
+ }
+
+ @SuppressWarnings("unchecked")
+ public void run() {
+ LOG.info("History Cleaner started");
+ currentTime = System.currentTimeMillis();
+ boolean halted = false;
+ //TODO Delete YYYY/MM/DD directories.
+ try {
+ List<FileStatus> serialDirList = findTimestampedDirectories();
+ //Sort in ascending order. Relies on YYYY/MM/DD/Serial
+ Collections.sort(serialDirList);
+ for (FileStatus serialDir : serialDirList) {
+ List<FileStatus> historyFileList = scanDirectoryForHistoryFiles(serialDir.getPath(), doneDirFc);
+ for (FileStatus historyFile : historyFileList) {
+ JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(historyFile.getPath().getName());
+ long effectiveTimestamp = getEffectiveTimestamp(jobIndexInfo.getFinishTime(), historyFile);
+ if (shouldDelete(effectiveTimestamp)) {
+ String confFileName = JobHistoryUtils.getIntermediateConfFileName(jobIndexInfo.getJobId());
+ MetaInfo metaInfo = new MetaInfo(historyFile.getPath(), new Path(historyFile.getPath().getParent(), confFileName), null, jobIndexInfo);
+ delete(metaInfo);
+ } else {
+ halted = true;
+ break;
+ }
+ }
+ if (!halted) {
+ deleteDir(serialDir.getPath());
+ removeDirectoryFromSerialNumberIndex(serialDir.getPath());
+ synchronized (existingDoneSubdirs) {
+ existingDoneSubdirs.remove(serialDir.getPath());
+ }
+
+ } else {
+ break; //Don't scan any more directories.
+ }
+ }
+ } catch (IOException e) {
+ LOG.warn("Error in History cleaner run", e);
+ }
+ LOG.info("History Cleaner complete");
+ LOG.info("FilesDeleted: " + filesDeleted);
+ LOG.info("Directories Deleted: " + dirsDeleted);
+ }
+
+ private boolean shouldDelete(long ts) {
+ return ((ts + maxAgeMillis) <= currentTime);
+ }
+
+ private long getEffectiveTimestamp(long finishTime, FileStatus fileStatus) {
+ if (finishTime == 0) {
+ return fileStatus.getModificationTime();
+ }
+ return finishTime;
+ }
+
+ private void delete(MetaInfo metaInfo) throws IOException {
+ deleteFile(metaInfo.getHistoryFile());
+ deleteFile(metaInfo.getConfFile());
+ jobListCache.remove(metaInfo.getJobIndexInfo().getJobId());
+ loadedJobCache.remove(metaInfo.getJobIndexInfo().getJobId());
+ }
+
+ private void deleteFile(final Path path) throws IOException {
+ doneDirFc.delete(doneDirFc.makeQualified(path), false);
+ filesDeleted++;
+ }
+
+ private void deleteDir(Path path) throws IOException {
+ doneDirFc.delete(doneDirFc.makeQualified(path), true);
+ dirsDeleted++;
+ }
+ }
+
+
+
+ //TODO AppContext - Not Required
+ private ApplicationAttemptId appAttemptID;
+ @Override
+ public ApplicationAttemptId getApplicationAttemptId() {
+ //TODO fixme - bogus appAttemptID for now
+ return appAttemptID;
+ }
+
+ //TODO AppContext - Not Required
+ private ApplicationId appID;
+ @Override
+ public ApplicationId getApplicationID() {
+ //TODO fixme - bogus appID for now
+ return appID;
+ }
+
+ //TODO AppContext - Not Required
+ @Override
+ public EventHandler getEventHandler() {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ //TODO AppContext - Not Required
+ private String userName;
+ @Override
+ public CharSequence getUser() {
+ if (userName != null) {
+ userName = conf.get(MRJobConfig.USER_NAME, "history-user");
+ }
+ return userName;
+ }
+
+ //TODO AppContext - Not Required
+ @Override
+ public Clock getClock() {
+ return null;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java
new file mode 100644
index 0000000..03bf3a4
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java
@@ -0,0 +1,88 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.hs;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JHConfig;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.service.CompositeService;
+
+/******************************************************************
+ * {@link JobHistoryServer} is responsible for servicing all job history
+ * related requests from client.
+ *
+ *****************************************************************/
+public class JobHistoryServer extends CompositeService {
+ private static final Log LOG = LogFactory.getLog(JobHistoryServer.class);
+ private HistoryContext historyContext;
+ private HistoryClientService clientService;
+ private JobHistory jobHistoryService;
+
+ static{
+ Configuration.addDefaultResource("mapred-default.xml");
+ Configuration.addDefaultResource("mapred-site.xml");
+ }
+
+ public JobHistoryServer() {
+ super(JobHistoryServer.class.getName());
+ }
+
+ @Override
+ public synchronized void init(Configuration conf) {
+ Configuration config = new YarnConfiguration(conf);
+ try {
+ doSecureLogin(conf);
+ } catch(IOException ie) {
+ throw new YarnException("History Server Failed to login", ie);
+ }
+ jobHistoryService = new JobHistory();
+ historyContext = (HistoryContext)jobHistoryService;
+ clientService = new HistoryClientService(historyContext);
+ addService(jobHistoryService);
+ addService(clientService);
+ super.init(config);
+ }
+
+ protected void doSecureLogin(Configuration conf) throws IOException {
+ SecurityUtil.login(conf, JHConfig.HS_KEYTAB_KEY,
+ JHConfig.HS_SERVER_PRINCIPAL_KEY);
+ }
+
+ public static void main(String[] args) {
+ StringUtils.startupShutdownMessage(JobHistoryServer.class, args, LOG);
+ JobHistoryServer server = null;
+ try {
+ server = new JobHistoryServer();
+ YarnConfiguration conf = new YarnConfiguration(new JobConf());
+ server.init(conf);
+ server.start();
+ } catch (Throwable e) {
+ LOG.fatal(StringUtils.stringifyException(e));
+ System.exit(-1);
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java
new file mode 100644
index 0000000..f0393e2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java
@@ -0,0 +1,143 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.hs;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+
+import clover.org.apache.log4j.Logger;
+
+public class PartialJob implements org.apache.hadoop.mapreduce.v2.app.job.Job {
+
+ private JobIndexInfo jobIndexInfo = null;
+ private JobId jobId = null;
+ private JobReport jobReport = null;
+
+ public PartialJob(JobIndexInfo jobIndexInfo, JobId jobId) {
+ this.jobIndexInfo = jobIndexInfo;
+ this.jobId = jobId;
+ jobReport = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobReport.class);
+ }
+
+ @Override
+ public JobId getID() {
+// return jobIndexInfo.getJobId();
+ return this.jobId;
+ }
+
+ @Override
+ public String getName() {
+ return jobIndexInfo.getJobName();
+ }
+
+ @Override
+ public JobState getState() {
+ JobState js = null;
+ try {
+ js = JobState.valueOf(jobIndexInfo.getJobStatus());
+ } catch (Exception e) {
+ // Meant for use by the display UI. Exception would prevent it from being
+ // rendered.e Defaulting to KILLED
+ Logger.getLogger(this.getClass().getName()).warn(
+ "Exception while parsing job state. Defaulting to KILLED", e);
+ js = JobState.KILLED;
+ }
+ return js;
+ }
+
+ @Override
+ public JobReport getReport() {
+ return jobReport;
+ }
+
+ @Override
+ public Counters getCounters() {
+ return null;
+ }
+
+ @Override
+ public Map<TaskId, Task> getTasks() {
+ return null;
+ }
+
+ @Override
+ public Map<TaskId, Task> getTasks(TaskType taskType) {
+ return null;
+ }
+
+ @Override
+ public Task getTask(TaskId taskID) {
+ return null;
+ }
+
+ @Override
+ public List<String> getDiagnostics() {
+ return null;
+ }
+
+ @Override
+ public int getTotalMaps() {
+ return jobIndexInfo.getNumMaps();
+ }
+
+ @Override
+ public int getTotalReduces() {
+ return jobIndexInfo.getNumReduces();
+ }
+
+ @Override
+ public int getCompletedMaps() {
+ return jobIndexInfo.getNumMaps();
+ }
+
+ @Override
+ public int getCompletedReduces() {
+ return jobIndexInfo.getNumReduces();
+ }
+
+ @Override
+ public boolean isUber() {
+ return false;
+ }
+
+ @Override
+ public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
+ int fromEventId, int maxEvents) {
+ return null;
+ }
+
+ @Override
+ public boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation) {
+ return false;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HSWebApp.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HSWebApp.java
new file mode 100644
index 0000000..83f5c4e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HSWebApp.java
@@ -0,0 +1,49 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.hs.webapp;
+
+import static org.apache.hadoop.yarn.util.StringHelper.pajoin;
+
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.webapp.AMParams;
+import org.apache.hadoop.mapreduce.v2.hs.HistoryContext;
+import org.apache.hadoop.yarn.webapp.WebApp;
+
+public class HSWebApp extends WebApp implements AMParams {
+
+ private HistoryContext history;
+
+ public HSWebApp(HistoryContext history) {
+ this.history = history;
+ }
+
+ @Override
+ public void setup() {
+ bind(AppContext.class).toInstance(history);
+ route("/", HsController.class);
+ route("/app", HsController.class);
+ route(pajoin("/job", JOB_ID), HsController.class, "job");
+ route(pajoin("/jobcounters", JOB_ID), HsController.class, "jobCounters");
+ route(pajoin("/tasks", JOB_ID, TASK_TYPE), HsController.class, "tasks");
+ route(pajoin("/attempts", JOB_ID, TASK_TYPE, ATTEMPT_STATE),
+ HsController.class, "attempts");
+ route(pajoin("/task", TASK_ID), HsController.class, "task");
+ }
+}
+
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsController.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsController.java
new file mode 100644
index 0000000..fab3008
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsController.java
@@ -0,0 +1,61 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.hs.webapp;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.v2.app.webapp.App;
+import org.apache.hadoop.mapreduce.v2.app.webapp.AppController;
+
+import com.google.inject.Inject;
+
+public class HsController extends AppController {
+
+ @Inject HsController(App app, Configuration conf, RequestContext ctx) {
+ super(app, conf, ctx, "History");
+ }
+
+ @Override
+ public void index() {
+ // TODO Auto-generated method stub
+ setTitle("JobHistory");
+ }
+
+ // Need all of these methods here also as Guice doesn't look into parent
+ // classes.
+ public void job() {
+ super.job();
+ }
+
+ public void jobCounters() {
+ super.jobCounters();
+ }
+
+ public void tasks() {
+ super.tasks();
+ }
+
+ public void task() {
+ super.task();
+ }
+
+ @Override
+ public void attempts() {
+ super.attempts();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java
new file mode 100644
index 0000000..0805698
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java
@@ -0,0 +1,46 @@
+package org.apache.hadoop.mapreduce.v2.hs.webapp;
+
+import org.apache.hadoop.mapreduce.v2.app.webapp.JobsBlock;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.view.TwoColumnLayout;
+
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
+
+public class HsView extends TwoColumnLayout {
+ @Override protected void preHead(Page.HTML<_> html) {
+ commonPreHead(html);
+ set(DATATABLES_ID, "jobs");
+ set(initID(DATATABLES, "jobs"), jobsTableInit());
+ setTableStyles(html, "jobs");
+ }
+
+ protected void commonPreHead(Page.HTML<_> html) {
+ //html.meta_http("refresh", "10");
+ set(ACCORDION_ID, "nav");
+ set(initID(ACCORDION, "nav"), "{autoHeight:false, active:1}");
+ set(THEMESWITCHER_ID, "themeswitcher");
+ }
+
+ /*
+ * (non-Javadoc)
+ * @see org.apache.hadoop.yarn.webapp.view.TwoColumnLayout#nav()
+ */
+
+ @Override
+ protected Class<? extends SubView> nav() {
+ return org.apache.hadoop.mapreduce.v2.app.webapp.NavBlock.class;
+ }
+
+ @Override
+ protected Class<? extends SubView> content() {
+ return JobsBlock.class;
+ }
+
+ private String jobsTableInit() {
+ return tableInit().
+ append(",aoColumns:[{sType:'title-numeric'},").
+ append("null,null,{sType:'title-numeric', bSearchable:false},null,").
+ append("null,{sType:'title-numeric',bSearchable:false}, null, null]}").
+ toString();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java
new file mode 100644
index 0000000..de4a087
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java
@@ -0,0 +1,129 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.hs;
+
+import java.util.Map;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.MRApp;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.service.Service;
+import org.junit.Test;
+
+public class TestJobHistoryEvents {
+ private static final Log LOG = LogFactory.getLog(TestJobHistoryEvents.class);
+
+ @Test
+ public void testHistoryEvents() throws Exception {
+ Configuration conf = new Configuration();
+ conf.set(MRJobConfig.USER_NAME, "test");
+ MRApp app = new MRAppWithHistory(2, 1, true, this.getClass().getName(), true);
+ app.submit(conf);
+ Job job = app.getContext().getAllJobs().values().iterator().next();
+ JobId jobId = job.getID();
+ LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
+ app.waitForState(job, JobState.SUCCEEDED);
+
+ //make sure all events are flushed
+ app.waitForState(Service.STATE.STOPPED);
+ /*
+ * Use HistoryContext to read logged events and verify the number of
+ * completed maps
+ */
+ HistoryContext context = new JobHistory();
+ ((JobHistory)context).init(conf);
+ Job parsedJob = context.getJob(jobId);
+ Assert.assertEquals("CompletedMaps not correct", 2,
+ parsedJob.getCompletedMaps());
+
+
+ Map<TaskId, Task> tasks = parsedJob.getTasks();
+ Assert.assertEquals("No of tasks not correct", 3, tasks.size());
+ for (Task task : tasks.values()) {
+ verifyTask(task);
+ }
+
+ Map<TaskId, Task> maps = parsedJob.getTasks(TaskType.MAP);
+ Assert.assertEquals("No of maps not correct", 2, maps.size());
+
+ Map<TaskId, Task> reduces = parsedJob.getTasks(TaskType.REDUCE);
+ Assert.assertEquals("No of reduces not correct", 1, reduces.size());
+
+
+ Assert.assertEquals("CompletedReduce not correct", 1,
+ parsedJob.getCompletedReduces());
+
+ Assert.assertEquals("Job state not currect", JobState.SUCCEEDED,
+ parsedJob.getState());
+ }
+
+ private void verifyTask(Task task) {
+ Assert.assertEquals("Task state not currect", TaskState.SUCCEEDED,
+ task.getState());
+ Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
+ Assert.assertEquals("No of attempts not correct", 1, attempts.size());
+ for (TaskAttempt attempt : attempts.values()) {
+ verifyAttempt(attempt);
+ }
+ }
+
+ private void verifyAttempt(TaskAttempt attempt) {
+ Assert.assertEquals("TaskAttempt state not currect",
+ TaskAttemptState.SUCCEEDED, attempt.getState());
+ }
+
+ static class MRAppWithHistory extends MRApp {
+ public MRAppWithHistory(int maps, int reduces, boolean autoComplete,
+ String testName, boolean cleanOnStart) {
+ super(maps, reduces, autoComplete, testName, cleanOnStart);
+ }
+
+ @Override
+ protected EventHandler<JobHistoryEvent> createJobHistoryHandler(
+ AppContext context) {
+ JobHistoryEventHandler eventHandler = new JobHistoryEventHandler(context,
+ getStartCount());
+ return eventHandler;
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ TestJobHistoryEvents t = new TestJobHistoryEvents();
+ t.testHistoryEvents();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java
new file mode 100644
index 0000000..bc85066
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java
@@ -0,0 +1,152 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.hs;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.StringTokenizer;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.app.MRApp;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory;
+import org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo;
+import org.apache.hadoop.yarn.service.Service;
+import org.junit.Test;
+
+public class TestJobHistoryParsing {
+ private static final Log LOG = LogFactory.getLog(TestJobHistoryParsing.class);
+ @Test
+ public void testHistoryParsing() throws Exception {
+ Configuration conf = new Configuration();
+ MRApp app = new MRAppWithHistory(2, 1, true, this.getClass().getName(), true);
+ app.submit(conf);
+ Job job = app.getContext().getAllJobs().values().iterator().next();
+ JobId jobId = job.getID();
+ LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
+ app.waitForState(job, JobState.SUCCEEDED);
+
+ //make sure all events are flushed
+ app.waitForState(Service.STATE.STOPPED);
+
+ String jobhistoryDir = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);
+ JobHistory jobHistory = new JobHistory();
+ jobHistory.init(conf);
+
+ JobIndexInfo jobIndexInfo = jobHistory.getJobMetaInfo(jobId).getJobIndexInfo();
+ String jobhistoryFileName = FileNameIndexUtils.getDoneFileName(jobIndexInfo);
+
+ Path historyFilePath = new Path(jobhistoryDir, jobhistoryFileName);
+ FSDataInputStream in = null;
+ LOG.info("JobHistoryFile is: " + historyFilePath);
+ FileContext fc = null;
+ try {
+ fc = FileContext.getFileContext(conf);
+ in = fc.open(fc.makeQualified(historyFilePath));
+ } catch (IOException ioe) {
+ LOG.info("Can not open history file: " + historyFilePath, ioe);
+ throw (new Exception("Can not open History File"));
+ }
+
+ JobHistoryParser parser = new JobHistoryParser(in);
+ JobInfo jobInfo = parser.parse();
+
+ Assert.assertTrue ("Incorrect username ",
+ jobInfo.getUsername().equals("mapred"));
+ Assert.assertTrue("Incorrect jobName ",
+ jobInfo.getJobname().equals("test"));
+ Assert.assertTrue("Incorrect queuename ",
+ jobInfo.getJobQueueName().equals("default"));
+ Assert.assertTrue("incorrect conf path",
+ jobInfo.getJobConfPath().equals("test"));
+ Assert.assertTrue("incorrect finishedMap ",
+ jobInfo.getFinishedMaps() == 2);
+ Assert.assertTrue("incorrect finishedReduces ",
+ jobInfo.getFinishedReduces() == 1);
+ int totalTasks = jobInfo.getAllTasks().size();
+ Assert.assertTrue("total number of tasks is incorrect ", totalTasks == 3);
+
+ //Assert at taskAttempt level
+ for (TaskInfo taskInfo : jobInfo.getAllTasks().values()) {
+ int taskAttemptCount = taskInfo.getAllTaskAttempts().size();
+ Assert.assertTrue("total number of task attempts ",
+ taskAttemptCount == 1);
+ }
+
+ String summaryFileName = JobHistoryUtils
+ .getIntermediateSummaryFileName(jobId);
+ Path summaryFile = new Path(jobhistoryDir, summaryFileName);
+ String jobSummaryString = jobHistory.getJobSummary(fc, summaryFile);
+ Assert.assertNotNull(jobSummaryString);
+
+ Map<String, String> jobSummaryElements = new HashMap<String, String>();
+ StringTokenizer strToken = new StringTokenizer(jobSummaryString, ",");
+ while (strToken.hasMoreTokens()) {
+ String keypair = strToken.nextToken();
+ jobSummaryElements.put(keypair.split("=")[0], keypair.split("=")[1]);
+
+ }
+
+ Assert.assertEquals("JobId does not match", jobId.toString(),
+ jobSummaryElements.get("jobId"));
+ Assert.assertTrue("submitTime should not be 0",
+ Long.parseLong(jobSummaryElements.get("submitTime")) != 0);
+ Assert.assertTrue("launchTime should not be 0",
+ Long.parseLong(jobSummaryElements.get("launchTime")) != 0);
+ Assert.assertTrue("firstMapTaskLaunchTime should not be 0",
+ Long.parseLong(jobSummaryElements.get("firstMapTaskLaunchTime")) != 0);
+ Assert
+ .assertTrue(
+ "firstReduceTaskLaunchTime should not be 0",
+ Long.parseLong(jobSummaryElements.get("firstReduceTaskLaunchTime")) != 0);
+ Assert.assertTrue("finishTime should not be 0",
+ Long.parseLong(jobSummaryElements.get("finishTime")) != 0);
+ Assert.assertEquals("Mismatch in num map slots", 2,
+ Integer.parseInt(jobSummaryElements.get("numMaps")));
+ Assert.assertEquals("Mismatch in num reduce slots", 1,
+ Integer.parseInt(jobSummaryElements.get("numReduces")));
+ Assert.assertEquals("User does not match", "mapred",
+ jobSummaryElements.get("user"));
+ Assert.assertEquals("Queue does not match", "default",
+ jobSummaryElements.get("queue"));
+ Assert.assertEquals("Status does not match", "SUCCEEDED",
+ jobSummaryElements.get("status"));
+ }
+
+ public static void main(String[] args) throws Exception {
+ TestJobHistoryParsing t = new TestJobHistoryParsing();
+ t.testHistoryParsing();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java
new file mode 100644
index 0000000..379b2f7
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.mapreduce.v2.hs.webapp;
+
+import org.apache.hadoop.yarn.webapp.WebApps;
+
+public class TestHSWebApp {
+ public static void main(String[] args) {
+ WebApps.$for("yarn").at(19888).start().joinThread();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/pom.xml
new file mode 100644
index 0000000..b14ba3e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/pom.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0"?>
+<project>
+ <parent>
+ <artifactId>hadoop-mapreduce-client</artifactId>
+ <groupId>org.apache.hadoop</groupId>
+ <version>${hadoop-mapreduce.version}</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+ <name>hadoop-mapreduce-client-jobclient</name>
+
+ <properties>
+ <install.file>${project.artifact.file}</install.file>
+ <fork.mode>always</fork.mode>
+ <mr.basedir>${project.parent.parent.basedir}</mr.basedir>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-shuffle</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-app</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-app</artifactId>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-hs</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-nodemanager</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-common</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-tests</artifactId>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-jar-plugin</artifactId>
+ <executions>
+ <execution>
+ <goals>
+ <goal>test-jar</goal>
+ </goals>
+ <phase>test-compile</phase>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <additionalClasspathElements>
+ <!-- workaround for JobConf#setJarByClass -->
+ <additionalClasspathElement>${project.build.directory}/${project.artifactId}-${project.version}-tests.jar</additionalClasspathElement>
+ </additionalClasspathElements>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
new file mode 100644
index 0000000..2eb755a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
@@ -0,0 +1,508 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+import java.net.UnknownHostException;
+import java.security.PrivilegedAction;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JHConfig;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.security.ApplicationTokenIdentifier;
+import org.apache.hadoop.yarn.security.SchedulerSecurityInfo;
+import org.apache.hadoop.yarn.security.client.ClientRMSecurityInfo;
+
+public class ClientServiceDelegate {
+ private static final Log LOG = LogFactory.getLog(ClientServiceDelegate.class);
+
+ private Configuration conf;
+ private ApplicationId currentAppId;
+ private ApplicationState currentAppState = ApplicationState.NEW;
+ private final ResourceMgrDelegate rm;
+ private MRClientProtocol realProxy = null;
+ private String serviceAddr = "";
+ private String serviceHttpAddr = "";
+ private RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+ ClientServiceDelegate(Configuration conf, ResourceMgrDelegate rm) {
+ this.conf = new Configuration(conf); // Cloning for modifying.
+ // For faster redirects from AM to HS.
+ this.conf.setInt(
+ CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 3);
+ this.rm = rm;
+ }
+
+ private MRClientProtocol getProxy(JobID jobId) throws YarnRemoteException {
+ return getProxy(TypeConverter.toYarn(jobId).getAppId(), false);
+ }
+
+ private MRClientProtocol getRefreshedProxy(JobID jobId) throws YarnRemoteException {
+ return getProxy(TypeConverter.toYarn(jobId).getAppId(), true);
+ }
+
+ private MRClientProtocol getProxy(ApplicationId appId,
+ boolean forceRefresh) throws YarnRemoteException {
+ if (!appId.equals(currentAppId) || forceRefresh || realProxy == null) {
+ currentAppId = appId;
+ refreshProxy();
+ }
+ return realProxy;
+ }
+
+ private void refreshProxy() throws YarnRemoteException {
+ //TODO RM NPEs for unknown jobs. History may still be aware.
+ // Possibly allow nulls through the PB tunnel, otherwise deal with an exception
+ // and redirect to the history server.
+ ApplicationReport application = rm.getApplicationReport(currentAppId);
+ while (ApplicationState.RUNNING.equals(application.getState())) {
+ try {
+ if (application.getHost() == null || "".equals(application.getHost())) {
+ LOG.debug("AM not assigned to Job. Waiting to get the AM ...");
+ Thread.sleep(2000);
+
+ LOG.debug("Application state is " + application.getState());
+ application = rm.getApplicationReport(currentAppId);
+ continue;
+ }
+ serviceAddr = application.getHost() + ":" + application.getRpcPort();
+ serviceHttpAddr = application.getTrackingUrl();
+ currentAppState = application.getState();
+ if (UserGroupInformation.isSecurityEnabled()) {
+ String clientTokenEncoded = application.getClientToken();
+ Token<ApplicationTokenIdentifier> clientToken =
+ new Token<ApplicationTokenIdentifier>();
+ clientToken.decodeFromUrlString(clientTokenEncoded);
+ clientToken.setService(new Text(application.getHost() + ":"
+ + application.getRpcPort()));
+ UserGroupInformation.getCurrentUser().addToken(clientToken);
+ }
+ LOG.info("Connecting to " + serviceAddr);
+ instantiateAMProxy(serviceAddr);
+ return;
+ } catch (Exception e) {
+ //possibly
+ //possibly the AM has crashed
+ //there may be some time before AM is restarted
+ //keep retrying by getting the address from RM
+ LOG.info("Could not connect to " + serviceAddr +
+ ". Waiting for getting the latest AM address...");
+ try {
+ Thread.sleep(2000);
+ } catch (InterruptedException e1) {
+ }
+ application = rm.getApplicationReport(currentAppId);
+ }
+ }
+
+ currentAppState = application.getState();
+ /** we just want to return if its allocating, so that we dont
+ * block on it. This is to be able to return job status
+ * on a allocating Application.
+ */
+
+ if (currentAppState == ApplicationState.NEW) {
+ realProxy = null;
+ return;
+ }
+
+ if (currentAppState == ApplicationState.SUCCEEDED
+ || currentAppState == ApplicationState.FAILED
+ || currentAppState == ApplicationState.KILLED) {
+ serviceAddr = conf.get(JHConfig.HS_BIND_ADDRESS,
+ JHConfig.DEFAULT_HS_BIND_ADDRESS);
+ LOG.info("Application state is completed. " +
+ "Redirecting to job history server " + serviceAddr);
+ try {
+ serviceHttpAddr = JobHistoryUtils.getHistoryUrl(conf, currentAppId);
+ } catch (UnknownHostException e) {
+ LOG.warn("Unable to get history url", e);
+ serviceHttpAddr = "UNKNOWN";
+ }
+ try {
+ instantiateHistoryProxy(serviceAddr);
+ return;
+ } catch (IOException e) {
+ throw new YarnException(e);
+ }
+ }
+ }
+
+ private void instantiateAMProxy(final String serviceAddr) throws IOException {
+ UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
+ LOG.trace("Connecting to ApplicationMaster at: " + serviceAddr);
+ realProxy = currentUser.doAs(new PrivilegedAction<MRClientProtocol>() {
+ @Override
+ public MRClientProtocol run() {
+ Configuration myConf = new Configuration(conf);
+ myConf.setClass(
+ YarnConfiguration.YARN_SECURITY_INFO,
+ SchedulerSecurityInfo.class, SecurityInfo.class);
+ YarnRPC rpc = YarnRPC.create(myConf);
+ return (MRClientProtocol) rpc.getProxy(MRClientProtocol.class,
+ NetUtils.createSocketAddr(serviceAddr), myConf);
+ }
+ });
+ LOG.trace("Connected to ApplicationMaster at: " + serviceAddr);
+ }
+
+ private void instantiateHistoryProxy(final String serviceAddr)
+ throws IOException {
+ LOG.trace("Connecting to HistoryServer at: " + serviceAddr);
+ Configuration myConf = new Configuration(conf);
+ //TODO This should ideally be using it's own class (instead of ClientRMSecurityInfo)
+ myConf.setClass(YarnConfiguration.YARN_SECURITY_INFO,
+ ClientRMSecurityInfo.class, SecurityInfo.class);
+ YarnRPC rpc = YarnRPC.create(myConf);
+ realProxy = (MRClientProtocol) rpc.getProxy(MRClientProtocol.class,
+ NetUtils.createSocketAddr(serviceAddr), myConf);
+ LOG.trace("Connected to HistoryServer at: " + serviceAddr);
+ }
+
+ public org.apache.hadoop.mapreduce.Counters getJobCounters(JobID arg0) throws IOException,
+ InterruptedException {
+ org.apache.hadoop.mapreduce.v2.api.records.JobId jobID = TypeConverter.toYarn(arg0);
+ try {
+ GetCountersRequest request = recordFactory.newRecordInstance(GetCountersRequest.class);
+ request.setJobId(jobID);
+ MRClientProtocol protocol = getProxy(arg0);
+ if (protocol == null) {
+ /* no AM to connect to, fake counters */
+ return new org.apache.hadoop.mapreduce.Counters();
+ }
+ return TypeConverter.fromYarn(protocol.getCounters(request).getCounters());
+ } catch(YarnRemoteException yre) {//thrown by remote server, no need to redirect
+ LOG.warn(RPCUtil.toString(yre));
+ throw yre;
+ } catch(Exception e) {
+ LOG.debug("Failing to contact application master", e);
+ try {
+ GetCountersRequest request = recordFactory.newRecordInstance(GetCountersRequest.class);
+ request.setJobId(jobID);
+ MRClientProtocol protocol = getRefreshedProxy(arg0);
+ if (protocol == null) {
+ /* no History to connect to, fake counters */
+ return new org.apache.hadoop.mapreduce.Counters();
+ }
+ return TypeConverter.fromYarn(protocol.getCounters(request).getCounters());
+ } catch(YarnRemoteException yre) {
+ LOG.warn(RPCUtil.toString(yre));
+ throw yre;
+ }
+ }
+ }
+
+ public String getJobHistoryDir() throws IOException, InterruptedException {
+ return JobHistoryUtils.getConfiguredHistoryServerDoneDirPrefix(conf);
+ }
+
+ public TaskCompletionEvent[] getTaskCompletionEvents(JobID arg0, int arg1,
+ int arg2) throws IOException, InterruptedException {
+ org.apache.hadoop.mapreduce.v2.api.records.JobId jobID = TypeConverter.toYarn(arg0);
+ List<org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent> list = null;
+ GetTaskAttemptCompletionEventsRequest request = recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsRequest.class);
+ MRClientProtocol protocol;
+ try {
+ request.setJobId(jobID);
+ request.setFromEventId(arg1);
+ request.setMaxEvents(arg2);
+ protocol = getProxy(arg0);
+ /** This is hack to get around the issue of faking jobstatus while the AM
+ * is coming up.
+ */
+ if (protocol == null) {
+ return new TaskCompletionEvent[0];
+ }
+ list = getProxy(arg0).getTaskAttemptCompletionEvents(request).getCompletionEventList();
+ } catch(YarnRemoteException yre) {//thrown by remote server, no need to redirect
+ LOG.warn(RPCUtil.toString(yre));
+ throw yre;
+ } catch(Exception e) {
+ LOG.debug("Failed to contact application master ", e);
+ try {
+ request.setJobId(jobID);
+ request.setFromEventId(arg1);
+ request.setMaxEvents(arg2);
+ protocol = getRefreshedProxy(arg0);
+ if (protocol == null) {
+ return new TaskCompletionEvent[0];
+ }
+ list = protocol.getTaskAttemptCompletionEvents(request).getCompletionEventList();
+ } catch(YarnRemoteException yre) {
+ LOG.warn(RPCUtil.toString(yre));
+ throw yre;
+ }
+ }
+ return TypeConverter.fromYarn(
+ list.toArray(new org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent[0]));
+ }
+
+ public String[] getTaskDiagnostics(org.apache.hadoop.mapreduce.TaskAttemptID
+ arg0)
+ throws IOException,
+ InterruptedException {
+
+ List<String> list = null;
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(arg0);
+ GetDiagnosticsRequest request = recordFactory.newRecordInstance(GetDiagnosticsRequest.class);
+ MRClientProtocol protocol;
+ try {
+ request.setTaskAttemptId(attemptID);
+ protocol = getProxy(arg0.getJobID());
+ if (protocol == null) {
+ return new String[0];
+ }
+ list = getProxy(arg0.getJobID()).getDiagnostics(request).getDiagnosticsList();
+ } catch(YarnRemoteException yre) {//thrown by remote server, no need to redirect
+ LOG.warn(RPCUtil.toString(yre));
+ throw yre;
+ } catch(Exception e) {
+ LOG.debug("Failed to contact application master ", e);
+ try {
+ protocol = getRefreshedProxy(arg0.getJobID());
+ if (protocol == null) {
+ return new String[0];
+ }
+ list = protocol.getDiagnostics(request).getDiagnosticsList();
+ } catch(YarnRemoteException yre) {
+ LOG.warn(RPCUtil.toString(yre));
+ throw yre;
+ }
+ }
+ String[] result = new String[list.size()];
+ int i = 0;
+ for (String c : list) {
+ result[i++] = c.toString();
+ }
+ return result;
+ }
+
+ private JobStatus createFakeJobReport(ApplicationState state,
+ org.apache.hadoop.mapreduce.v2.api.records.JobId jobId, String jobFile) {
+ JobReport jobreport = recordFactory.newRecordInstance(JobReport.class);
+ jobreport.setCleanupProgress(0);
+ jobreport.setFinishTime(0);
+ jobreport.setJobId(jobId);
+ jobreport.setMapProgress(0);
+ /** fix this, the start time should be fixed */
+ jobreport.setStartTime(0);
+ jobreport.setReduceProgress(0);
+ jobreport.setSetupProgress(0);
+
+ if (currentAppState == ApplicationState.NEW) {
+ /* the protocol wasnt instantiated because the applicaton wasnt launched
+ * return a fake report.
+ */
+ jobreport.setJobState(JobState.NEW);
+ } else if (currentAppState == ApplicationState.SUBMITTED) {
+ jobreport.setJobState(JobState.NEW);
+ } else if (currentAppState == ApplicationState.KILLED) {
+ jobreport.setJobState(JobState.KILLED);
+ } else if (currentAppState == ApplicationState.FAILED) {
+ jobreport.setJobState(JobState.FAILED);
+ }
+ return TypeConverter.fromYarn(jobreport, jobFile, serviceHttpAddr);
+ }
+
+ public JobStatus getJobStatus(JobID oldJobID) throws YarnRemoteException,
+ YarnRemoteException {
+ org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
+ TypeConverter.toYarn(oldJobID);
+ String stagingDir = conf.get("yarn.apps.stagingDir");
+ String jobFile = stagingDir + "/" + jobId.toString();
+ JobReport report = null;
+ MRClientProtocol protocol;
+ GetJobReportRequest request = recordFactory.newRecordInstance(GetJobReportRequest.class);
+ try {
+ request.setJobId(jobId);
+ protocol = getProxy(oldJobID);
+
+ if (protocol == null) {
+ return createFakeJobReport(currentAppState, jobId, jobFile);
+ }
+ report = getProxy(oldJobID).getJobReport(request).getJobReport();
+ } catch(YarnRemoteException yre) {//thrown by remote server, no need to redirect
+ LOG.warn(RPCUtil.toString(yre));
+ throw yre;
+ } catch (Exception e) {
+ try {
+ request.setJobId(jobId);
+ protocol = getRefreshedProxy(oldJobID);
+ /* this is possible if an application that was running is killed */
+ if (protocol == null) {
+ return createFakeJobReport(currentAppState, jobId, jobFile);
+ }
+ report = protocol.getJobReport(request).getJobReport();
+ } catch(YarnRemoteException yre) {
+ LOG.warn(RPCUtil.toString(yre));
+ throw yre;
+ }
+ }
+ return TypeConverter.fromYarn(report, jobFile, serviceHttpAddr);
+ }
+
+ public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID jobID, TaskType taskType)
+ throws YarnRemoteException, YarnRemoteException {
+ List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports = null;
+ org.apache.hadoop.mapreduce.v2.api.records.JobId nJobID = TypeConverter.toYarn(jobID);
+ GetTaskReportsRequest request = recordFactory.newRecordInstance(GetTaskReportsRequest.class);
+ MRClientProtocol protocol = null;
+ try {
+ request.setJobId(nJobID);
+ request.setTaskType(TypeConverter.toYarn(taskType));
+ protocol = getProxy(jobID);
+ if (protocol == null) {
+ return new org.apache.hadoop.mapreduce.TaskReport[0];
+ }
+ taskReports = getProxy(jobID).getTaskReports(request).getTaskReportList();
+ } catch(YarnRemoteException yre) {//thrown by remote server, no need to redirect
+ LOG.warn(RPCUtil.toString(yre));
+ throw yre;
+ } catch(Exception e) {
+ LOG.debug("Failed to contact application master ", e);
+ try {
+ request.setJobId(nJobID);
+ request.setTaskType(TypeConverter.toYarn(taskType));
+ protocol = getRefreshedProxy(jobID);
+ if (protocol == null) {
+ return new org.apache.hadoop.mapreduce.TaskReport[0];
+ }
+ taskReports = protocol.getTaskReports(request).getTaskReportList();
+ } catch(YarnRemoteException yre) {
+ LOG.warn(RPCUtil.toString(yre));
+ throw yre;
+ }
+ }
+ return TypeConverter.fromYarn
+ (taskReports).toArray(new org.apache.hadoop.mapreduce.TaskReport[0]);
+ }
+
+ public boolean killTask(TaskAttemptID taskAttemptID, boolean fail)
+ throws YarnRemoteException {
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID
+ = TypeConverter.toYarn(taskAttemptID);
+ KillTaskAttemptRequest killRequest = recordFactory.newRecordInstance(KillTaskAttemptRequest.class);
+ FailTaskAttemptRequest failRequest = recordFactory.newRecordInstance(FailTaskAttemptRequest.class);
+ MRClientProtocol protocol = getProxy(taskAttemptID.getJobID());
+ if (protocol == null) {
+ return false;
+ }
+ try {
+ if (fail) {
+ failRequest.setTaskAttemptId(attemptID);
+ getProxy(taskAttemptID.getJobID()).failTaskAttempt(failRequest);
+ } else {
+ killRequest.setTaskAttemptId(attemptID);
+ getProxy(taskAttemptID.getJobID()).killTaskAttempt(killRequest);
+ }
+ } catch(YarnRemoteException yre) {//thrown by remote server, no need to redirect
+ LOG.warn(RPCUtil.toString(yre));
+ throw yre;
+ } catch(Exception e) {
+ LOG.debug("Failed to contact application master ", e);
+ MRClientProtocol proxy = getRefreshedProxy(taskAttemptID.getJobID());
+ if (proxy == null) {
+ return false;
+ }
+ try {
+ if (fail) {
+ failRequest.setTaskAttemptId(attemptID);
+ proxy.failTaskAttempt(failRequest);
+ } else {
+ killRequest.setTaskAttemptId(attemptID);
+ proxy.killTaskAttempt(killRequest);
+ }
+ } catch(YarnRemoteException yre) {
+ LOG.warn(RPCUtil.toString(yre));
+ throw yre;
+ }
+ }
+ return true;
+ }
+
+ public boolean killJob(JobID oldJobID)
+ throws YarnRemoteException {
+ org.apache.hadoop.mapreduce.v2.api.records.JobId jobId
+ = TypeConverter.toYarn(oldJobID);
+ KillJobRequest killRequest = recordFactory.newRecordInstance(KillJobRequest.class);
+ MRClientProtocol protocol = getProxy(oldJobID);
+ if (protocol == null) {
+ return false;
+ }
+ try {
+ killRequest.setJobId(jobId);
+ protocol.killJob(killRequest);
+ return true;
+ } catch(YarnRemoteException yre) {//thrown by remote server, no need to redirect
+ LOG.warn(RPCUtil.toString(yre));
+ throw yre;
+ } catch(Exception e) {
+ // Not really requied - if this is always the history context.
+ LOG.debug("Failed to contact application master ", e);
+ MRClientProtocol proxy = getRefreshedProxy(oldJobID);
+ if (proxy == null) {
+ return false;
+ }
+ try {
+ killRequest.setJobId(jobId);
+ protocol.killJob(killRequest);
+ return true;
+ } catch(YarnRemoteException yre) {
+ LOG.warn(RPCUtil.toString(yre));
+ throw yre;
+ }
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
new file mode 100644
index 0000000..0459009
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -0,0 +1,320 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.ClusterMetrics;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.QueueAclsInfo;
+import org.apache.hadoop.mapreduce.QueueInfo;
+import org.apache.hadoop.mapreduce.TaskTrackerInfo;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.mapreduce.v2.MRConstants;
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.yarn.api.ClientRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationMaster;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.security.client.ClientRMSecurityInfo;
+
+
+// TODO: This should be part of something like yarn-client.
+public class ResourceMgrDelegate {
+ private static final Log LOG = LogFactory.getLog(ResourceMgrDelegate.class);
+
+ private Configuration conf;
+ ClientRMProtocol applicationsManager;
+ private ApplicationId applicationId;
+ private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+ public ResourceMgrDelegate(Configuration conf) {
+ this.conf = conf;
+ YarnRPC rpc = YarnRPC.create(conf);
+ InetSocketAddress rmAddress =
+ NetUtils.createSocketAddr(conf.get(
+ YarnConfiguration.APPSMANAGER_ADDRESS,
+ YarnConfiguration.DEFAULT_APPSMANAGER_BIND_ADDRESS));
+ LOG.info("Connecting to ResourceManager at " + rmAddress);
+ Configuration appsManagerServerConf = new Configuration(this.conf);
+ appsManagerServerConf.setClass(
+ YarnConfiguration.YARN_SECURITY_INFO,
+ ClientRMSecurityInfo.class, SecurityInfo.class);
+ applicationsManager =
+ (ClientRMProtocol) rpc.getProxy(ClientRMProtocol.class,
+ rmAddress, appsManagerServerConf);
+ LOG.info("Connected to ResourceManager at " + rmAddress);
+ }
+
+ public void cancelDelegationToken(Token<DelegationTokenIdentifier> arg0)
+ throws IOException, InterruptedException {
+ return;
+ }
+
+
+ public TaskTrackerInfo[] getActiveTrackers() throws IOException,
+ InterruptedException {
+ GetClusterNodesRequest request =
+ recordFactory.newRecordInstance(GetClusterNodesRequest.class);
+ GetClusterNodesResponse response =
+ applicationsManager.getClusterNodes(request);
+ return TypeConverter.fromYarnNodes(response.getNodeReports());
+ }
+
+
+ public JobStatus[] getAllJobs() throws IOException, InterruptedException {
+ GetAllApplicationsRequest request =
+ recordFactory.newRecordInstance(GetAllApplicationsRequest.class);
+ GetAllApplicationsResponse response =
+ applicationsManager.getAllApplications(request);
+ return TypeConverter.fromYarnApps(response.getApplicationList());
+ }
+
+
+ public TaskTrackerInfo[] getBlacklistedTrackers() throws IOException,
+ InterruptedException {
+ // TODO: Implement getBlacklistedTrackers
+ LOG.warn("getBlacklistedTrackers - Not implemented yet");
+ return new TaskTrackerInfo[0];
+ }
+
+
+ public ClusterMetrics getClusterMetrics() throws IOException,
+ InterruptedException {
+ GetClusterMetricsRequest request = recordFactory.newRecordInstance(GetClusterMetricsRequest.class);
+ GetClusterMetricsResponse response = applicationsManager.getClusterMetrics(request);
+ YarnClusterMetrics metrics = response.getClusterMetrics();
+ ClusterMetrics oldMetrics = new ClusterMetrics(1, 1, 1, 1, 1, 1,
+ metrics.getNumNodeManagers() * 10, metrics.getNumNodeManagers() * 2, 1,
+ metrics.getNumNodeManagers(), 0, 0);
+ return oldMetrics;
+ }
+
+
+ public Token<DelegationTokenIdentifier> getDelegationToken(Text arg0)
+ throws IOException, InterruptedException {
+ // TODO: Implement getDelegationToken
+ LOG.warn("getDelegationToken - Not Implemented");
+ return null;
+ }
+
+
+ public String getFilesystemName() throws IOException, InterruptedException {
+ return FileSystem.get(conf).getUri().toString();
+ }
+
+ public JobID getNewJobID() throws IOException, InterruptedException {
+ GetNewApplicationIdRequest request = recordFactory.newRecordInstance(GetNewApplicationIdRequest.class);
+ applicationId = applicationsManager.getNewApplicationId(request).getApplicationId();
+ return TypeConverter.fromYarn(applicationId);
+ }
+
+ private static final String ROOT = "root";
+
+ private GetQueueInfoRequest getQueueInfoRequest(String queueName,
+ boolean includeApplications, boolean includeChildQueues, boolean recursive) {
+ GetQueueInfoRequest request =
+ recordFactory.newRecordInstance(GetQueueInfoRequest.class);
+ request.setQueueName(queueName);
+ request.setIncludeApplications(includeApplications);
+ request.setIncludeChildQueues(includeChildQueues);
+ request.setRecursive(recursive);
+ return request;
+
+ }
+
+ public QueueInfo getQueue(String queueName) throws IOException,
+ InterruptedException {
+ GetQueueInfoRequest request =
+ getQueueInfoRequest(queueName, true, false, false);
+ recordFactory.newRecordInstance(GetQueueInfoRequest.class);
+ return TypeConverter.fromYarn(
+ applicationsManager.getQueueInfo(request).getQueueInfo());
+ }
+
+ private void getChildQueues(org.apache.hadoop.yarn.api.records.QueueInfo parent,
+ List<org.apache.hadoop.yarn.api.records.QueueInfo> queues) {
+ List<org.apache.hadoop.yarn.api.records.QueueInfo> childQueues =
+ parent.getChildQueues();
+
+ for (org.apache.hadoop.yarn.api.records.QueueInfo child : childQueues) {
+ queues.add(child);
+ getChildQueues(child, queues);
+ }
+ }
+
+
+ public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException,
+ InterruptedException {
+ GetQueueUserAclsInfoRequest request =
+ recordFactory.newRecordInstance(GetQueueUserAclsInfoRequest.class);
+ List<QueueUserACLInfo> userAcls =
+ applicationsManager.getQueueUserAcls(request).getUserAclsInfoList();
+ return TypeConverter.fromYarnQueueUserAclsInfo(userAcls);
+ }
+
+
+ public QueueInfo[] getQueues() throws IOException, InterruptedException {
+ List<org.apache.hadoop.yarn.api.records.QueueInfo> queues =
+ new ArrayList<org.apache.hadoop.yarn.api.records.QueueInfo>();
+
+ org.apache.hadoop.yarn.api.records.QueueInfo rootQueue =
+ applicationsManager.getQueueInfo(
+ getQueueInfoRequest(ROOT, false, true, true)).getQueueInfo();
+ getChildQueues(rootQueue, queues);
+
+ return TypeConverter.fromYarnQueueInfo(queues);
+ }
+
+
+ public QueueInfo[] getRootQueues() throws IOException, InterruptedException {
+ List<org.apache.hadoop.yarn.api.records.QueueInfo> queues =
+ new ArrayList<org.apache.hadoop.yarn.api.records.QueueInfo>();
+
+ org.apache.hadoop.yarn.api.records.QueueInfo rootQueue =
+ applicationsManager.getQueueInfo(
+ getQueueInfoRequest(ROOT, false, true, false)).getQueueInfo();
+ getChildQueues(rootQueue, queues);
+
+ return TypeConverter.fromYarnQueueInfo(queues);
+ }
+
+ public QueueInfo[] getChildQueues(String parent) throws IOException,
+ InterruptedException {
+ List<org.apache.hadoop.yarn.api.records.QueueInfo> queues =
+ new ArrayList<org.apache.hadoop.yarn.api.records.QueueInfo>();
+
+ org.apache.hadoop.yarn.api.records.QueueInfo parentQueue =
+ applicationsManager.getQueueInfo(
+ getQueueInfoRequest(parent, false, true, false)).getQueueInfo();
+ getChildQueues(parentQueue, queues);
+
+ return TypeConverter.fromYarnQueueInfo(queues);
+ }
+
+ public String getStagingAreaDir() throws IOException, InterruptedException {
+// Path path = new Path(MRJobConstants.JOB_SUBMIT_DIR);
+ String user =
+ UserGroupInformation.getCurrentUser().getShortUserName();
+ Path path = MRApps.getStagingAreaDir(conf, user);
+ LOG.info("DEBUG --- getStagingAreaDir: dir=" + path);
+ return path.toString();
+ }
+
+
+ public String getSystemDir() throws IOException, InterruptedException {
+ Path sysDir = new Path(MRConstants.JOB_SUBMIT_DIR);
+ //FileContext.getFileContext(conf).delete(sysDir, true);
+ return sysDir.toString();
+ }
+
+
+ public long getTaskTrackerExpiryInterval() throws IOException,
+ InterruptedException {
+ return 0;
+ }
+
+ public void setJobPriority(JobID arg0, String arg1) throws IOException,
+ InterruptedException {
+ return;
+ }
+
+
+ public long getProtocolVersion(String arg0, long arg1) throws IOException {
+ return 0;
+ }
+
+ public long renewDelegationToken(Token<DelegationTokenIdentifier> arg0)
+ throws IOException, InterruptedException {
+ // TODO: Implement renewDelegationToken
+ LOG.warn("renewDelegationToken - Not implemented");
+ return 0;
+ }
+
+
+ public ApplicationId submitApplication(ApplicationSubmissionContext appContext)
+ throws IOException {
+ appContext.setApplicationId(applicationId);
+ SubmitApplicationRequest request = recordFactory.newRecordInstance(SubmitApplicationRequest.class);
+ request.setApplicationSubmissionContext(appContext);
+ applicationsManager.submitApplication(request);
+ LOG.info("Submitted application " + applicationId + " to ResourceManager");
+ return applicationId;
+ }
+
+ public void killApplication(ApplicationId applicationId) throws IOException {
+ FinishApplicationRequest request = recordFactory.newRecordInstance(FinishApplicationRequest.class);
+ request.setApplicationId(applicationId);
+ applicationsManager.finishApplication(request);
+ LOG.info("Killing application " + applicationId);
+ }
+
+
+ public ApplicationReport getApplicationReport(ApplicationId appId)
+ throws YarnRemoteException {
+ GetApplicationReportRequest request = recordFactory
+ .newRecordInstance(GetApplicationReportRequest.class);
+ request.setApplicationId(appId);
+ GetApplicationReportResponse response = applicationsManager
+ .getApplicationReport(request);
+ ApplicationReport applicationReport = response.getApplicationReport();
+ return applicationReport;
+ }
+
+ public ApplicationId getApplicationId() {
+ return applicationId;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
new file mode 100644
index 0000000..d8a7e4d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
@@ -0,0 +1,586 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+import java.net.URI;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Vector;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.ipc.VersionedProtocol;
+import org.apache.hadoop.mapreduce.ClusterMetrics;
+import org.apache.hadoop.mapreduce.Counters;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.JobSubmissionFiles;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.QueueAclsInfo;
+import org.apache.hadoop.mapreduce.QueueInfo;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.TaskCompletionEvent;
+import org.apache.hadoop.mapreduce.TaskReport;
+import org.apache.hadoop.mapreduce.TaskTrackerInfo;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus;
+import org.apache.hadoop.mapreduce.filecache.DistributedCache;
+import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
+import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.mapreduce.v2.ClientConstants;
+import org.apache.hadoop.mapreduce.v2.MRConstants;
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+
+/**
+ * This class enables the current JobClient (0.22 hadoop) to run on YARN.
+ */
+public class YARNRunner implements ClientProtocol {
+
+ private static final Log LOG = LogFactory.getLog(YARNRunner.class);
+
+ public static final String YARN_AM_VMEM_MB =
+ "yarn.am.mapreduce.resource.mb";
+ private static final int DEFAULT_YARN_AM_VMEM_MB = 2048;
+
+ private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+ private ResourceMgrDelegate resMgrDelegate;
+ private ClientServiceDelegate clientServiceDelegate;
+ private YarnConfiguration conf;
+ private final FileContext defaultFileContext;
+
+ /**
+ * Yarn runner incapsulates the client interface of
+ * yarn
+ * @param conf the configuration object for the client
+ */
+ public YARNRunner(Configuration conf) {
+ this.conf = new YarnConfiguration(conf);
+ try {
+ this.resMgrDelegate = new ResourceMgrDelegate(this.conf);
+ this.clientServiceDelegate = new ClientServiceDelegate(this.conf,
+ resMgrDelegate);
+ this.defaultFileContext = FileContext.getFileContext(this.conf);
+ } catch (UnsupportedFileSystemException ufe) {
+ throw new RuntimeException("Error in instantiating YarnClient", ufe);
+ }
+ }
+
+ @Override
+ public void cancelDelegationToken(Token<DelegationTokenIdentifier> arg0)
+ throws IOException, InterruptedException {
+ resMgrDelegate.cancelDelegationToken(arg0);
+ }
+
+ @Override
+ public TaskTrackerInfo[] getActiveTrackers() throws IOException,
+ InterruptedException {
+ return resMgrDelegate.getActiveTrackers();
+ }
+
+ @Override
+ public JobStatus[] getAllJobs() throws IOException, InterruptedException {
+ return resMgrDelegate.getAllJobs();
+ }
+
+ @Override
+ public TaskTrackerInfo[] getBlacklistedTrackers() throws IOException,
+ InterruptedException {
+ return resMgrDelegate.getBlacklistedTrackers();
+ }
+
+ @Override
+ public ClusterMetrics getClusterMetrics() throws IOException,
+ InterruptedException {
+ return resMgrDelegate.getClusterMetrics();
+ }
+
+ @Override
+ public Token<DelegationTokenIdentifier> getDelegationToken(Text arg0)
+ throws IOException, InterruptedException {
+ return resMgrDelegate.getDelegationToken(arg0);
+ }
+
+ @Override
+ public String getFilesystemName() throws IOException, InterruptedException {
+ return resMgrDelegate.getFilesystemName();
+ }
+
+ @Override
+ public JobID getNewJobID() throws IOException, InterruptedException {
+ return resMgrDelegate.getNewJobID();
+ }
+
+ @Override
+ public QueueInfo getQueue(String queueName) throws IOException,
+ InterruptedException {
+ return resMgrDelegate.getQueue(queueName);
+ }
+
+ @Override
+ public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException,
+ InterruptedException {
+ return resMgrDelegate.getQueueAclsForCurrentUser();
+ }
+
+ @Override
+ public QueueInfo[] getQueues() throws IOException, InterruptedException {
+ return resMgrDelegate.getQueues();
+ }
+
+ @Override
+ public QueueInfo[] getRootQueues() throws IOException, InterruptedException {
+ return resMgrDelegate.getRootQueues();
+ }
+
+ @Override
+ public QueueInfo[] getChildQueues(String parent) throws IOException,
+ InterruptedException {
+ return resMgrDelegate.getChildQueues(parent);
+ }
+
+ @Override
+ public String getStagingAreaDir() throws IOException, InterruptedException {
+ return resMgrDelegate.getStagingAreaDir();
+ }
+
+ @Override
+ public String getSystemDir() throws IOException, InterruptedException {
+ return resMgrDelegate.getSystemDir();
+ }
+
+ @Override
+ public long getTaskTrackerExpiryInterval() throws IOException,
+ InterruptedException {
+ return resMgrDelegate.getTaskTrackerExpiryInterval();
+ }
+
+ @Override
+ public JobStatus submitJob(JobID jobId, String jobSubmitDir, Credentials ts)
+ throws IOException, InterruptedException {
+
+ // Upload only in security mode: TODO
+ Path applicationTokensFile =
+ new Path(jobSubmitDir, MRConstants.APPLICATION_TOKENS_FILE);
+ try {
+ ts.writeTokenStorageFile(applicationTokensFile, conf);
+ } catch (IOException e) {
+ throw new YarnException(e);
+ }
+
+ // XXX Remove
+ Path submitJobDir = new Path(jobSubmitDir);
+ FileContext defaultFS = FileContext.getFileContext(conf);
+ Path submitJobFile =
+ defaultFS.makeQualified(JobSubmissionFiles.getJobConfPath(submitJobDir));
+ FSDataInputStream in = defaultFS.open(submitJobFile);
+ conf.addResource(in);
+ // ---
+
+ // Construct necessary information to start the MR AM
+ ApplicationSubmissionContext appContext =
+ createApplicationSubmissionContext(conf, jobSubmitDir, ts);
+ setupDistributedCache(conf, appContext);
+
+ // XXX Remove
+ in.close();
+ // ---
+
+ // Submit to ResourceManager
+ ApplicationId applicationId = resMgrDelegate.submitApplication(appContext);
+
+ ApplicationReport appMaster = resMgrDelegate
+ .getApplicationReport(applicationId);
+ if (appMaster.getState() == ApplicationState.FAILED || appMaster.getState() ==
+ ApplicationState.KILLED) {
+ throw RPCUtil.getRemoteException("failed to run job");
+ }
+ return clientServiceDelegate.getJobStatus(jobId);
+ }
+
+ private LocalResource createApplicationResource(FileContext fs, Path p)
+ throws IOException {
+ LocalResource rsrc = recordFactory.newRecordInstance(LocalResource.class);
+ FileStatus rsrcStat = fs.getFileStatus(p);
+ rsrc.setResource(ConverterUtils.getYarnUrlFromPath(fs
+ .getDefaultFileSystem().resolvePath(rsrcStat.getPath())));
+ rsrc.setSize(rsrcStat.getLen());
+ rsrc.setTimestamp(rsrcStat.getModificationTime());
+ rsrc.setType(LocalResourceType.FILE);
+ rsrc.setVisibility(LocalResourceVisibility.APPLICATION);
+ return rsrc;
+ }
+
+ private ApplicationSubmissionContext createApplicationSubmissionContext(
+ Configuration jobConf,
+ String jobSubmitDir, Credentials ts) throws IOException {
+ ApplicationSubmissionContext appContext =
+ recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
+ ApplicationId applicationId = resMgrDelegate.getApplicationId();
+ appContext.setApplicationId(applicationId);
+ Resource capability = recordFactory.newRecordInstance(Resource.class);
+ capability.setMemory(conf.getInt(YARN_AM_VMEM_MB, DEFAULT_YARN_AM_VMEM_MB));
+ LOG.info("AppMaster capability = " + capability);
+ appContext.setMasterCapability(capability);
+
+ Path jobConfPath = new Path(jobSubmitDir, MRConstants.JOB_CONF_FILE);
+
+ URL yarnUrlForJobSubmitDir = ConverterUtils
+ .getYarnUrlFromPath(defaultFileContext.getDefaultFileSystem()
+ .resolvePath(
+ defaultFileContext.makeQualified(new Path(jobSubmitDir))));
+ LOG.debug("Creating setup context, jobSubmitDir url is "
+ + yarnUrlForJobSubmitDir);
+
+ appContext.setResource(MRConstants.JOB_SUBMIT_DIR,
+ yarnUrlForJobSubmitDir);
+
+ appContext.setResourceTodo(MRConstants.JOB_CONF_FILE,
+ createApplicationResource(defaultFileContext,
+ jobConfPath));
+ if (jobConf.get(MRJobConfig.JAR) != null) {
+ appContext.setResourceTodo(MRConstants.JOB_JAR,
+ createApplicationResource(defaultFileContext,
+ new Path(jobSubmitDir, MRConstants.JOB_JAR)));
+ } else {
+ // Job jar may be null. For e.g, for pipes, the job jar is the hadoop
+ // mapreduce jar itself which is already on the classpath.
+ LOG.info("Job jar is not present. "
+ + "Not adding any jar to the list of resources.");
+ }
+
+ // TODO gross hack
+ for (String s : new String[] { "job.split", "job.splitmetainfo",
+ MRConstants.APPLICATION_TOKENS_FILE }) {
+ appContext.setResourceTodo(
+ MRConstants.JOB_SUBMIT_DIR + "/" + s,
+ createApplicationResource(defaultFileContext, new Path(jobSubmitDir, s)));
+ }
+
+ // TODO: Only if security is on.
+ List<String> fsTokens = new ArrayList<String>();
+ for (Token<? extends TokenIdentifier> token : ts.getAllTokens()) {
+ fsTokens.add(token.encodeToUrlString());
+ }
+
+ // TODO - Remove this!
+ appContext.addAllFsTokens(fsTokens);
+ DataOutputBuffer dob = new DataOutputBuffer();
+ ts.writeTokenStorageToStream(dob);
+ appContext.setFsTokensTodo(ByteBuffer.wrap(dob.getData(), 0, dob.getLength()));
+
+ // Add queue information
+ appContext.setQueue(jobConf.get(JobContext.QUEUE_NAME, JobConf.DEFAULT_QUEUE_NAME));
+
+ // Add job name
+ appContext.setApplicationName(jobConf.get(JobContext.JOB_NAME, "N/A"));
+
+ // Add the command line
+ String javaHome = "$JAVA_HOME";
+ Vector<CharSequence> vargs = new Vector<CharSequence>(8);
+ vargs.add(javaHome + "/bin/java");
+ vargs.add("-Dhadoop.root.logger="
+ + conf.get(ClientConstants.MR_APPMASTER_LOG_OPTS,
+ ClientConstants.DEFAULT_MR_APPMASTER_LOG_OPTS) + ",console");
+
+ vargs.add(conf.get(ClientConstants.MR_APPMASTER_COMMAND_OPTS,
+ ClientConstants.DEFAULT_MR_APPMASTER_COMMAND_OPTS));
+
+ // Add { job jar, MR app jar } to classpath.
+ Map<String, String> environment = new HashMap<String, String>();
+// appContext.environment = new HashMap<CharSequence, CharSequence>();
+ MRApps.setInitialClasspath(environment);
+ MRApps.addToClassPath(environment, MRConstants.JOB_JAR);
+ MRApps.addToClassPath(environment,
+ MRConstants.YARN_MAPREDUCE_APP_JAR_PATH);
+ appContext.addAllEnvironment(environment);
+ vargs.add("org.apache.hadoop.mapreduce.v2.app.MRAppMaster");
+ vargs.add(String.valueOf(applicationId.getClusterTimestamp()));
+ vargs.add(String.valueOf(applicationId.getId()));
+ vargs.add(ApplicationConstants.AM_FAIL_COUNT_STRING);
+ vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout");
+ vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr");
+
+ Vector<String> vargsFinal = new Vector<String>(8);
+ // Final commmand
+ StringBuilder mergedCommand = new StringBuilder();
+ for (CharSequence str : vargs) {
+ mergedCommand.append(str).append(" ");
+ }
+ vargsFinal.add(mergedCommand.toString());
+
+ LOG.info("Command to launch container for ApplicationMaster is : "
+ + mergedCommand);
+
+ appContext.addAllCommands(vargsFinal);
+ // TODO: RM should get this from RPC.
+ appContext.setUser(UserGroupInformation.getCurrentUser().getShortUserName());
+ return appContext;
+ }
+
+ /**
+ * * TODO: Copied for now from TaskAttemptImpl.java ... fixme
+ * @param strs
+ * @return
+ */
+ private static long[] parseTimeStamps(String[] strs) {
+ if (null == strs) {
+ return null;
+ }
+ long[] result = new long[strs.length];
+ for(int i=0; i < strs.length; ++i) {
+ result[i] = Long.parseLong(strs[i]);
+ }
+ return result;
+ }
+
+ /**
+ * TODO: Copied for now from TaskAttemptImpl.java ... fixme
+ *
+ * TODO: This is currently needed in YarnRunner as user code like setupJob,
+ * cleanupJob may need access to dist-cache. Once we separate distcache for
+ * maps, reduces, setup etc, this can include only a subset of artificats.
+ * This is also needed for uberAM case where we run everything inside AM.
+ */
+ private void setupDistributedCache(Configuration conf,
+ ApplicationSubmissionContext container) throws IOException {
+
+ // Cache archives
+ parseDistributedCacheArtifacts(conf, container, LocalResourceType.ARCHIVE,
+ DistributedCache.getCacheArchives(conf),
+ parseTimeStamps(DistributedCache.getArchiveTimestamps(conf)),
+ getFileSizes(conf, MRJobConfig.CACHE_ARCHIVES_SIZES),
+ DistributedCache.getArchiveVisibilities(conf),
+ DistributedCache.getArchiveClassPaths(conf));
+
+ // Cache files
+ parseDistributedCacheArtifacts(conf, container, LocalResourceType.FILE,
+ DistributedCache.getCacheFiles(conf),
+ parseTimeStamps(DistributedCache.getFileTimestamps(conf)),
+ getFileSizes(conf, MRJobConfig.CACHE_FILES_SIZES),
+ DistributedCache.getFileVisibilities(conf),
+ DistributedCache.getFileClassPaths(conf));
+ }
+
+ // TODO - Move this to MR!
+ // Use TaskDistributedCacheManager.CacheFiles.makeCacheFiles(URI[], long[], boolean[], Path[], FileType)
+ private void parseDistributedCacheArtifacts(Configuration conf,
+ ApplicationSubmissionContext container, LocalResourceType type,
+ URI[] uris, long[] timestamps, long[] sizes, boolean visibilities[],
+ Path[] pathsToPutOnClasspath) throws IOException {
+
+ if (uris != null) {
+ // Sanity check
+ if ((uris.length != timestamps.length) || (uris.length != sizes.length) ||
+ (uris.length != visibilities.length)) {
+ throw new IllegalArgumentException("Invalid specification for " +
+ "distributed-cache artifacts of type " + type + " :" +
+ " #uris=" + uris.length +
+ " #timestamps=" + timestamps.length +
+ " #visibilities=" + visibilities.length
+ );
+ }
+
+ Map<String, Path> classPaths = new HashMap<String, Path>();
+ if (pathsToPutOnClasspath != null) {
+ for (Path p : pathsToPutOnClasspath) {
+ FileSystem fs = p.getFileSystem(conf);
+ p = p.makeQualified(fs.getUri(), fs.getWorkingDirectory());
+ classPaths.put(p.toUri().getPath().toString(), p);
+ }
+ }
+ for (int i = 0; i < uris.length; ++i) {
+ URI u = uris[i];
+ Path p = new Path(u);
+ FileSystem fs = p.getFileSystem(conf);
+ p = fs.resolvePath(
+ p.makeQualified(fs.getUri(), fs.getWorkingDirectory()));
+ // Add URI fragment or just the filename
+ Path name = new Path((null == u.getFragment())
+ ? p.getName()
+ : u.getFragment());
+ if (name.isAbsolute()) {
+ throw new IllegalArgumentException("Resource name must be relative");
+ }
+ String linkName = name.toUri().getPath();
+ container.setResourceTodo(
+ linkName,
+ createLocalResource(
+ p.toUri(), type,
+ visibilities[i]
+ ? LocalResourceVisibility.PUBLIC
+ : LocalResourceVisibility.PRIVATE,
+ sizes[i], timestamps[i])
+ );
+ if (classPaths.containsKey(u.getPath())) {
+ Map<String, String> environment = container.getAllEnvironment();
+ MRApps.addToClassPath(environment, linkName);
+ }
+ }
+ }
+ }
+
+ // TODO - Move this to MR!
+ private static long[] getFileSizes(Configuration conf, String key) {
+ String[] strs = conf.getStrings(key);
+ if (strs == null) {
+ return null;
+ }
+ long[] result = new long[strs.length];
+ for(int i=0; i < strs.length; ++i) {
+ result[i] = Long.parseLong(strs[i]);
+ }
+ return result;
+ }
+
+ private LocalResource createLocalResource(URI uri,
+ LocalResourceType type, LocalResourceVisibility visibility,
+ long size, long timestamp) throws IOException {
+ LocalResource resource = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(LocalResource.class);
+ resource.setResource(ConverterUtils.getYarnUrlFromURI(uri));
+ resource.setType(type);
+ resource.setVisibility(visibility);
+ resource.setSize(size);
+ resource.setTimestamp(timestamp);
+ return resource;
+ }
+
+ @Override
+ public void setJobPriority(JobID arg0, String arg1) throws IOException,
+ InterruptedException {
+ resMgrDelegate.setJobPriority(arg0, arg1);
+ }
+
+ @Override
+ public long getProtocolVersion(String arg0, long arg1) throws IOException {
+ return resMgrDelegate.getProtocolVersion(arg0, arg1);
+ }
+
+ @Override
+ public long renewDelegationToken(Token<DelegationTokenIdentifier> arg0)
+ throws IOException, InterruptedException {
+ return resMgrDelegate.renewDelegationToken(arg0);
+ }
+
+
+ @Override
+ public Counters getJobCounters(JobID arg0) throws IOException,
+ InterruptedException {
+ return clientServiceDelegate.getJobCounters(arg0);
+ }
+
+ @Override
+ public String getJobHistoryDir() throws IOException, InterruptedException {
+ return clientServiceDelegate.getJobHistoryDir();
+ }
+
+ @Override
+ public JobStatus getJobStatus(JobID jobID) throws IOException,
+ InterruptedException {
+ JobStatus status = clientServiceDelegate.getJobStatus(jobID);
+ return status;
+ }
+
+ @Override
+ public TaskCompletionEvent[] getTaskCompletionEvents(JobID arg0, int arg1,
+ int arg2) throws IOException, InterruptedException {
+ return clientServiceDelegate.getTaskCompletionEvents(arg0, arg1, arg2);
+ }
+
+ @Override
+ public String[] getTaskDiagnostics(TaskAttemptID arg0) throws IOException,
+ InterruptedException {
+ return clientServiceDelegate.getTaskDiagnostics(arg0);
+ }
+
+ @Override
+ public TaskReport[] getTaskReports(JobID jobID, TaskType taskType)
+ throws IOException, InterruptedException {
+ return clientServiceDelegate
+ .getTaskReports(jobID, taskType);
+ }
+
+ @Override
+ public void killJob(JobID arg0) throws IOException, InterruptedException {
+ if (!clientServiceDelegate.killJob(arg0)) {
+ resMgrDelegate.killApplication(TypeConverter.toYarn(arg0).getAppId());
+ }
+ }
+
+ @Override
+ public boolean killTask(TaskAttemptID arg0, boolean arg1) throws IOException,
+ InterruptedException {
+ return clientServiceDelegate.killTask(arg0, arg1);
+ }
+
+ @Override
+ public AccessControlList getQueueAdmins(String arg0) throws IOException {
+ return new AccessControlList("*");
+ }
+
+ @Override
+ public JobTrackerStatus getJobTrackerStatus() throws IOException,
+ InterruptedException {
+ return JobTrackerStatus.RUNNING;
+ }
+
+ @Override
+ public ProtocolSignature getProtocolSignature(String protocol,
+ long clientVersion, int clientMethodsHash) throws IOException {
+ return ProtocolSignature.getProtocolSignature(this, protocol, clientVersion,
+ clientMethodsHash);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YarnClientProtocolProvider.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YarnClientProtocolProvider.java
new file mode 100644
index 0000000..431b49f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YarnClientProtocolProvider.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
+import org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider;
+
+public class YarnClientProtocolProvider extends ClientProtocolProvider {
+
+ @Override
+ public ClientProtocol create(Configuration conf) throws IOException {
+ if ("yarn".equals(conf.get(MRConfig.FRAMEWORK_NAME))) {
+ return new YARNRunner(conf);
+ }
+ return null;
+ }
+
+ @Override
+ public ClientProtocol create(InetSocketAddress addr, Configuration conf)
+ throws IOException {
+ return create(conf);
+ }
+
+ @Override
+ public void close(ClientProtocol clientProtocol) throws IOException {
+ // nothing to do
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapreduce/v2/ClientConstants.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapreduce/v2/ClientConstants.java
new file mode 100644
index 0000000..7cab156
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapreduce/v2/ClientConstants.java
@@ -0,0 +1,31 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2;
+
+public interface ClientConstants {
+
+ public static final String MR_APPMASTER_COMMAND_OPTS =
+ "yarn.appMaster.commandOpts";
+
+ public static final String DEFAULT_MR_APPMASTER_COMMAND_OPTS = "-Xmx1536m";
+
+ public static final String MR_APPMASTER_LOG_OPTS = "yarn.appMaster.logOpts";
+
+ public static final String DEFAULT_MR_APPMASTER_LOG_OPTS = "INFO";
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider
new file mode 100644
index 0000000..b4fd930
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider
@@ -0,0 +1,28 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+org.apache.hadoop.mapred.YarnClientProtocolProvider
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+org.apache.hadoop.mapred.YarnClientProtocolProvider
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/FailMapper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/FailMapper.java
new file mode 100644
index 0000000..00df10a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/FailMapper.java
@@ -0,0 +1,23 @@
+package org.apache.hadoop;
+
+import java.io.IOException;
+
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.mapred.MapReduceBase;
+import org.apache.hadoop.mapred.Mapper;
+import org.apache.hadoop.mapred.OutputCollector;
+import org.apache.hadoop.mapred.Reporter;
+
+// Mapper that fails
+public class FailMapper extends MapReduceBase implements
+ Mapper<WritableComparable, Writable, WritableComparable, Writable> {
+
+ public void map(WritableComparable key, Writable value,
+ OutputCollector<WritableComparable, Writable> out, Reporter reporter)
+ throws IOException {
+ // NOTE- the next line is required for the TestDebugScript test to succeed
+ System.err.println("failing map");
+ throw new RuntimeException("failing map");
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/FailingMapper.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/FailingMapper.java
new file mode 100644
index 0000000..e9502b1
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/FailingMapper.java
@@ -0,0 +1,44 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop;
+
+import java.io.IOException;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Mapper;
+
+/**
+ * Fails the Mapper. First attempt throws exception. Rest do System.exit.
+ *
+ */
+public class FailingMapper extends Mapper<Text, Text, Text, Text> {
+ public void map(Text key, Text value,
+ Context context) throws IOException,InterruptedException {
+ if (context.getTaskAttemptID().getId() == 0) {
+ System.out.println("Attempt:" + context.getTaskAttemptID() +
+ " Failing mapper throwing exception");
+ throw new IOException("Attempt:" + context.getTaskAttemptID() +
+ " Failing mapper throwing exception");
+ } else {
+ System.out.println("Attempt:" + context.getTaskAttemptID() +
+ " Exiting");
+ System.exit(-1);
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/RandomTextWriterJob.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/RandomTextWriterJob.java
new file mode 100644
index 0000000..8d357cb
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/RandomTextWriterJob.java
@@ -0,0 +1,758 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.InputFormat;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.input.FileSplit;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+public class RandomTextWriterJob extends Configured implements Tool {
+
+ public static final String TOTAL_BYTES =
+ "mapreduce.randomtextwriter.totalbytes";
+ public static final String BYTES_PER_MAP =
+ "mapreduce.randomtextwriter.bytespermap";
+ public static final String MAX_VALUE = "mapreduce.randomtextwriter.maxwordsvalue";
+ public static final String MIN_VALUE = "mapreduce.randomtextwriter.minwordsvalue";
+ public static final String MIN_KEY = "mapreduce.randomtextwriter.minwordskey";
+ public static final String MAX_KEY = "mapreduce.randomtextwriter.maxwordskey";
+
+ static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
+
+ public Job createJob(Configuration conf) throws IOException {
+ long numBytesToWritePerMap = conf.getLong(BYTES_PER_MAP, 10 * 1024);
+ long totalBytesToWrite = conf.getLong(TOTAL_BYTES, numBytesToWritePerMap);
+ int numMaps = (int) (totalBytesToWrite / numBytesToWritePerMap);
+ if (numMaps == 0 && totalBytesToWrite > 0) {
+ numMaps = 1;
+ conf.setLong(BYTES_PER_MAP, totalBytesToWrite);
+ }
+ conf.setInt(MRJobConfig.NUM_MAPS, numMaps);
+
+ Job job = new Job(conf);
+
+ job.setJarByClass(RandomTextWriterJob.class);
+ job.setJobName("random-text-writer");
+
+ job.setOutputKeyClass(Text.class);
+ job.setOutputValueClass(Text.class);
+
+ job.setInputFormatClass(RandomInputFormat.class);
+ job.setMapperClass(RandomTextMapper.class);
+
+ job.setOutputFormatClass(SequenceFileOutputFormat.class);
+ //FileOutputFormat.setOutputPath(job, new Path("random-output"));
+ job.setNumReduceTasks(0);
+ return job;
+ }
+
+ public static class RandomInputFormat extends InputFormat<Text, Text> {
+
+ /**
+ * Generate the requested number of file splits, with the filename
+ * set to the filename of the output file.
+ */
+ public List<InputSplit> getSplits(JobContext job) throws IOException {
+ List<InputSplit> result = new ArrayList<InputSplit>();
+ Path outDir = FileOutputFormat.getOutputPath(job);
+ int numSplits =
+ job.getConfiguration().getInt(MRJobConfig.NUM_MAPS, 1);
+ for(int i=0; i < numSplits; ++i) {
+ result.add(new FileSplit(new Path(outDir, "dummy-split-" + i), 0, 1,
+ (String[])null));
+ }
+ return result;
+ }
+
+ /**
+ * Return a single record (filename, "") where the filename is taken from
+ * the file split.
+ */
+ public static class RandomRecordReader extends RecordReader<Text, Text> {
+ Path name;
+ Text key = null;
+ Text value = new Text();
+ public RandomRecordReader(Path p) {
+ name = p;
+ }
+
+ public void initialize(InputSplit split,
+ TaskAttemptContext context)
+ throws IOException, InterruptedException {
+
+ }
+
+ public boolean nextKeyValue() {
+ if (name != null) {
+ key = new Text();
+ key.set(name.getName());
+ name = null;
+ return true;
+ }
+ return false;
+ }
+
+ public Text getCurrentKey() {
+ return key;
+ }
+
+ public Text getCurrentValue() {
+ return value;
+ }
+
+ public void close() {}
+
+ public float getProgress() {
+ return 0.0f;
+ }
+ }
+
+ public RecordReader<Text, Text> createRecordReader(InputSplit split,
+ TaskAttemptContext context) throws IOException, InterruptedException {
+ return new RandomRecordReader(((FileSplit) split).getPath());
+ }
+ }
+
+ public static class RandomTextMapper extends Mapper<Text, Text, Text, Text> {
+
+ private long numBytesToWrite;
+ private int minWordsInKey;
+ private int wordsInKeyRange;
+ private int minWordsInValue;
+ private int wordsInValueRange;
+ private Random random = new Random();
+
+ /**
+ * Save the configuration value that we need to write the data.
+ */
+ public void setup(Context context) {
+ Configuration conf = context.getConfiguration();
+ numBytesToWrite = conf.getLong(BYTES_PER_MAP,
+ 1*1024*1024*1024);
+ minWordsInKey = conf.getInt(MIN_KEY, 5);
+ wordsInKeyRange = (conf.getInt(MAX_KEY, 10) - minWordsInKey);
+ minWordsInValue = conf.getInt(MIN_VALUE, 10);
+ wordsInValueRange = (conf.getInt(MAX_VALUE, 100) - minWordsInValue);
+ }
+
+ /**
+ * Given an output filename, write a bunch of random records to it.
+ */
+ public void map(Text key, Text value,
+ Context context) throws IOException,InterruptedException {
+ int itemCount = 0;
+ while (numBytesToWrite > 0) {
+ // Generate the key/value
+ int noWordsKey = minWordsInKey +
+ (wordsInKeyRange != 0 ? random.nextInt(wordsInKeyRange) : 0);
+ int noWordsValue = minWordsInValue +
+ (wordsInValueRange != 0 ? random.nextInt(wordsInValueRange) : 0);
+ Text keyWords = generateSentence(noWordsKey);
+ Text valueWords = generateSentence(noWordsValue);
+
+ // Write the sentence
+ context.write(keyWords, valueWords);
+
+ numBytesToWrite -= (keyWords.getLength() + valueWords.getLength());
+
+ // Update counters, progress etc.
+ context.getCounter(Counters.BYTES_WRITTEN).increment(
+ keyWords.getLength() + valueWords.getLength());
+ context.getCounter(Counters.RECORDS_WRITTEN).increment(1);
+ if (++itemCount % 200 == 0) {
+ context.setStatus("wrote record " + itemCount + ". " +
+ numBytesToWrite + " bytes left.");
+ }
+ }
+ context.setStatus("done with " + itemCount + " records.");
+ }
+
+ private Text generateSentence(int noWords) {
+ StringBuffer sentence = new StringBuffer();
+ String space = " ";
+ for (int i=0; i < noWords; ++i) {
+ sentence.append(words[random.nextInt(words.length)]);
+ sentence.append(space);
+ }
+ return new Text(sentence.toString());
+ }
+
+ private static String[] words = {
+ "diurnalness", "Homoiousian",
+ "spiranthic", "tetragynian",
+ "silverhead", "ungreat",
+ "lithograph", "exploiter",
+ "physiologian", "by",
+ "hellbender", "Filipendula",
+ "undeterring", "antiscolic",
+ "pentagamist", "hypoid",
+ "cacuminal", "sertularian",
+ "schoolmasterism", "nonuple",
+ "gallybeggar", "phytonic",
+ "swearingly", "nebular",
+ "Confervales", "thermochemically",
+ "characinoid", "cocksuredom",
+ "fallacious", "feasibleness",
+ "debromination", "playfellowship",
+ "tramplike", "testa",
+ "participatingly", "unaccessible",
+ "bromate", "experientialist",
+ "roughcast", "docimastical",
+ "choralcelo", "blightbird",
+ "peptonate", "sombreroed",
+ "unschematized", "antiabolitionist",
+ "besagne", "mastication",
+ "bromic", "sviatonosite",
+ "cattimandoo", "metaphrastical",
+ "endotheliomyoma", "hysterolysis",
+ "unfulminated", "Hester",
+ "oblongly", "blurredness",
+ "authorling", "chasmy",
+ "Scorpaenidae", "toxihaemia",
+ "Dictograph", "Quakerishly",
+ "deaf", "timbermonger",
+ "strammel", "Thraupidae",
+ "seditious", "plerome",
+ "Arneb", "eristically",
+ "serpentinic", "glaumrie",
+ "socioromantic", "apocalypst",
+ "tartrous", "Bassaris",
+ "angiolymphoma", "horsefly",
+ "kenno", "astronomize",
+ "euphemious", "arsenide",
+ "untongued", "parabolicness",
+ "uvanite", "helpless",
+ "gemmeous", "stormy",
+ "templar", "erythrodextrin",
+ "comism", "interfraternal",
+ "preparative", "parastas",
+ "frontoorbital", "Ophiosaurus",
+ "diopside", "serosanguineous",
+ "ununiformly", "karyological",
+ "collegian", "allotropic",
+ "depravity", "amylogenesis",
+ "reformatory", "epidymides",
+ "pleurotropous", "trillium",
+ "dastardliness", "coadvice",
+ "embryotic", "benthonic",
+ "pomiferous", "figureheadship",
+ "Megaluridae", "Harpa",
+ "frenal", "commotion",
+ "abthainry", "cobeliever",
+ "manilla", "spiciferous",
+ "nativeness", "obispo",
+ "monilioid", "biopsic",
+ "valvula", "enterostomy",
+ "planosubulate", "pterostigma",
+ "lifter", "triradiated",
+ "venialness", "tum",
+ "archistome", "tautness",
+ "unswanlike", "antivenin",
+ "Lentibulariaceae", "Triphora",
+ "angiopathy", "anta",
+ "Dawsonia", "becomma",
+ "Yannigan", "winterproof",
+ "antalgol", "harr",
+ "underogating", "ineunt",
+ "cornberry", "flippantness",
+ "scyphostoma", "approbation",
+ "Ghent", "Macraucheniidae",
+ "scabbiness", "unanatomized",
+ "photoelasticity", "eurythermal",
+ "enation", "prepavement",
+ "flushgate", "subsequentially",
+ "Edo", "antihero",
+ "Isokontae", "unforkedness",
+ "porriginous", "daytime",
+ "nonexecutive", "trisilicic",
+ "morphiomania", "paranephros",
+ "botchedly", "impugnation",
+ "Dodecatheon", "obolus",
+ "unburnt", "provedore",
+ "Aktistetae", "superindifference",
+ "Alethea", "Joachimite",
+ "cyanophilous", "chorograph",
+ "brooky", "figured",
+ "periclitation", "quintette",
+ "hondo", "ornithodelphous",
+ "unefficient", "pondside",
+ "bogydom", "laurinoxylon",
+ "Shiah", "unharmed",
+ "cartful", "noncrystallized",
+ "abusiveness", "cromlech",
+ "japanned", "rizzomed",
+ "underskin", "adscendent",
+ "allectory", "gelatinousness",
+ "volcano", "uncompromisingly",
+ "cubit", "idiotize",
+ "unfurbelowed", "undinted",
+ "magnetooptics", "Savitar",
+ "diwata", "ramosopalmate",
+ "Pishquow", "tomorn",
+ "apopenptic", "Haversian",
+ "Hysterocarpus", "ten",
+ "outhue", "Bertat",
+ "mechanist", "asparaginic",
+ "velaric", "tonsure",
+ "bubble", "Pyrales",
+ "regardful", "glyphography",
+ "calabazilla", "shellworker",
+ "stradametrical", "havoc",
+ "theologicopolitical", "sawdust",
+ "diatomaceous", "jajman",
+ "temporomastoid", "Serrifera",
+ "Ochnaceae", "aspersor",
+ "trailmaking", "Bishareen",
+ "digitule", "octogynous",
+ "epididymitis", "smokefarthings",
+ "bacillite", "overcrown",
+ "mangonism", "sirrah",
+ "undecorated", "psychofugal",
+ "bismuthiferous", "rechar",
+ "Lemuridae", "frameable",
+ "thiodiazole", "Scanic",
+ "sportswomanship", "interruptedness",
+ "admissory", "osteopaedion",
+ "tingly", "tomorrowness",
+ "ethnocracy", "trabecular",
+ "vitally", "fossilism",
+ "adz", "metopon",
+ "prefatorial", "expiscate",
+ "diathermacy", "chronist",
+ "nigh", "generalizable",
+ "hysterogen", "aurothiosulphuric",
+ "whitlowwort", "downthrust",
+ "Protestantize", "monander",
+ "Itea", "chronographic",
+ "silicize", "Dunlop",
+ "eer", "componental",
+ "spot", "pamphlet",
+ "antineuritic", "paradisean",
+ "interruptor", "debellator",
+ "overcultured", "Florissant",
+ "hyocholic", "pneumatotherapy",
+ "tailoress", "rave",
+ "unpeople", "Sebastian",
+ "thermanesthesia", "Coniferae",
+ "swacking", "posterishness",
+ "ethmopalatal", "whittle",
+ "analgize", "scabbardless",
+ "naught", "symbiogenetically",
+ "trip", "parodist",
+ "columniform", "trunnel",
+ "yawler", "goodwill",
+ "pseudohalogen", "swangy",
+ "cervisial", "mediateness",
+ "genii", "imprescribable",
+ "pony", "consumptional",
+ "carposporangial", "poleax",
+ "bestill", "subfebrile",
+ "sapphiric", "arrowworm",
+ "qualminess", "ultraobscure",
+ "thorite", "Fouquieria",
+ "Bermudian", "prescriber",
+ "elemicin", "warlike",
+ "semiangle", "rotular",
+ "misthread", "returnability",
+ "seraphism", "precostal",
+ "quarried", "Babylonism",
+ "sangaree", "seelful",
+ "placatory", "pachydermous",
+ "bozal", "galbulus",
+ "spermaphyte", "cumbrousness",
+ "pope", "signifier",
+ "Endomycetaceae", "shallowish",
+ "sequacity", "periarthritis",
+ "bathysphere", "pentosuria",
+ "Dadaism", "spookdom",
+ "Consolamentum", "afterpressure",
+ "mutter", "louse",
+ "ovoviviparous", "corbel",
+ "metastoma", "biventer",
+ "Hydrangea", "hogmace",
+ "seizing", "nonsuppressed",
+ "oratorize", "uncarefully",
+ "benzothiofuran", "penult",
+ "balanocele", "macropterous",
+ "dishpan", "marten",
+ "absvolt", "jirble",
+ "parmelioid", "airfreighter",
+ "acocotl", "archesporial",
+ "hypoplastral", "preoral",
+ "quailberry", "cinque",
+ "terrestrially", "stroking",
+ "limpet", "moodishness",
+ "canicule", "archididascalian",
+ "pompiloid", "overstaid",
+ "introducer", "Italical",
+ "Christianopaganism", "prescriptible",
+ "subofficer", "danseuse",
+ "cloy", "saguran",
+ "frictionlessly", "deindividualization",
+ "Bulanda", "ventricous",
+ "subfoliar", "basto",
+ "scapuloradial", "suspend",
+ "stiffish", "Sphenodontidae",
+ "eternal", "verbid",
+ "mammonish", "upcushion",
+ "barkometer", "concretion",
+ "preagitate", "incomprehensible",
+ "tristich", "visceral",
+ "hemimelus", "patroller",
+ "stentorophonic", "pinulus",
+ "kerykeion", "brutism",
+ "monstership", "merciful",
+ "overinstruct", "defensibly",
+ "bettermost", "splenauxe",
+ "Mormyrus", "unreprimanded",
+ "taver", "ell",
+ "proacquittal", "infestation",
+ "overwoven", "Lincolnlike",
+ "chacona", "Tamil",
+ "classificational", "lebensraum",
+ "reeveland", "intuition",
+ "Whilkut", "focaloid",
+ "Eleusinian", "micromembrane",
+ "byroad", "nonrepetition",
+ "bacterioblast", "brag",
+ "ribaldrous", "phytoma",
+ "counteralliance", "pelvimetry",
+ "pelf", "relaster",
+ "thermoresistant", "aneurism",
+ "molossic", "euphonym",
+ "upswell", "ladhood",
+ "phallaceous", "inertly",
+ "gunshop", "stereotypography",
+ "laryngic", "refasten",
+ "twinling", "oflete",
+ "hepatorrhaphy", "electrotechnics",
+ "cockal", "guitarist",
+ "topsail", "Cimmerianism",
+ "larklike", "Llandovery",
+ "pyrocatechol", "immatchable",
+ "chooser", "metrocratic",
+ "craglike", "quadrennial",
+ "nonpoisonous", "undercolored",
+ "knob", "ultratense",
+ "balladmonger", "slait",
+ "sialadenitis", "bucketer",
+ "magnificently", "unstipulated",
+ "unscourged", "unsupercilious",
+ "packsack", "pansophism",
+ "soorkee", "percent",
+ "subirrigate", "champer",
+ "metapolitics", "spherulitic",
+ "involatile", "metaphonical",
+ "stachyuraceous", "speckedness",
+ "bespin", "proboscidiform",
+ "gul", "squit",
+ "yeelaman", "peristeropode",
+ "opacousness", "shibuichi",
+ "retinize", "yote",
+ "misexposition", "devilwise",
+ "pumpkinification", "vinny",
+ "bonze", "glossing",
+ "decardinalize", "transcortical",
+ "serphoid", "deepmost",
+ "guanajuatite", "wemless",
+ "arval", "lammy",
+ "Effie", "Saponaria",
+ "tetrahedral", "prolificy",
+ "excerpt", "dunkadoo",
+ "Spencerism", "insatiately",
+ "Gilaki", "oratorship",
+ "arduousness", "unbashfulness",
+ "Pithecolobium", "unisexuality",
+ "veterinarian", "detractive",
+ "liquidity", "acidophile",
+ "proauction", "sural",
+ "totaquina", "Vichyite",
+ "uninhabitedness", "allegedly",
+ "Gothish", "manny",
+ "Inger", "flutist",
+ "ticktick", "Ludgatian",
+ "homotransplant", "orthopedical",
+ "diminutively", "monogoneutic",
+ "Kenipsim", "sarcologist",
+ "drome", "stronghearted",
+ "Fameuse", "Swaziland",
+ "alen", "chilblain",
+ "beatable", "agglomeratic",
+ "constitutor", "tendomucoid",
+ "porencephalous", "arteriasis",
+ "boser", "tantivy",
+ "rede", "lineamental",
+ "uncontradictableness", "homeotypical",
+ "masa", "folious",
+ "dosseret", "neurodegenerative",
+ "subtransverse", "Chiasmodontidae",
+ "palaeotheriodont", "unstressedly",
+ "chalcites", "piquantness",
+ "lampyrine", "Aplacentalia",
+ "projecting", "elastivity",
+ "isopelletierin", "bladderwort",
+ "strander", "almud",
+ "iniquitously", "theologal",
+ "bugre", "chargeably",
+ "imperceptivity", "meriquinoidal",
+ "mesophyte", "divinator",
+ "perfunctory", "counterappellant",
+ "synovial", "charioteer",
+ "crystallographical", "comprovincial",
+ "infrastapedial", "pleasurehood",
+ "inventurous", "ultrasystematic",
+ "subangulated", "supraoesophageal",
+ "Vaishnavism", "transude",
+ "chrysochrous", "ungrave",
+ "reconciliable", "uninterpleaded",
+ "erlking", "wherefrom",
+ "aprosopia", "antiadiaphorist",
+ "metoxazine", "incalculable",
+ "umbellic", "predebit",
+ "foursquare", "unimmortal",
+ "nonmanufacture", "slangy",
+ "predisputant", "familist",
+ "preaffiliate", "friarhood",
+ "corelysis", "zoonitic",
+ "halloo", "paunchy",
+ "neuromimesis", "aconitine",
+ "hackneyed", "unfeeble",
+ "cubby", "autoschediastical",
+ "naprapath", "lyrebird",
+ "inexistency", "leucophoenicite",
+ "ferrogoslarite", "reperuse",
+ "uncombable", "tambo",
+ "propodiale", "diplomatize",
+ "Russifier", "clanned",
+ "corona", "michigan",
+ "nonutilitarian", "transcorporeal",
+ "bought", "Cercosporella",
+ "stapedius", "glandularly",
+ "pictorially", "weism",
+ "disilane", "rainproof",
+ "Caphtor", "scrubbed",
+ "oinomancy", "pseudoxanthine",
+ "nonlustrous", "redesertion",
+ "Oryzorictinae", "gala",
+ "Mycogone", "reappreciate",
+ "cyanoguanidine", "seeingness",
+ "breadwinner", "noreast",
+ "furacious", "epauliere",
+ "omniscribent", "Passiflorales",
+ "uninductive", "inductivity",
+ "Orbitolina", "Semecarpus",
+ "migrainoid", "steprelationship",
+ "phlogisticate", "mesymnion",
+ "sloped", "edificator",
+ "beneficent", "culm",
+ "paleornithology", "unurban",
+ "throbless", "amplexifoliate",
+ "sesquiquintile", "sapience",
+ "astucious", "dithery",
+ "boor", "ambitus",
+ "scotching", "uloid",
+ "uncompromisingness", "hoove",
+ "waird", "marshiness",
+ "Jerusalem", "mericarp",
+ "unevoked", "benzoperoxide",
+ "outguess", "pyxie",
+ "hymnic", "euphemize",
+ "mendacity", "erythremia",
+ "rosaniline", "unchatteled",
+ "lienteria", "Bushongo",
+ "dialoguer", "unrepealably",
+ "rivethead", "antideflation",
+ "vinegarish", "manganosiderite",
+ "doubtingness", "ovopyriform",
+ "Cephalodiscus", "Muscicapa",
+ "Animalivora", "angina",
+ "planispheric", "ipomoein",
+ "cuproiodargyrite", "sandbox",
+ "scrat", "Munnopsidae",
+ "shola", "pentafid",
+ "overstudiousness", "times",
+ "nonprofession", "appetible",
+ "valvulotomy", "goladar",
+ "uniarticular", "oxyterpene",
+ "unlapsing", "omega",
+ "trophonema", "seminonflammable",
+ "circumzenithal", "starer",
+ "depthwise", "liberatress",
+ "unleavened", "unrevolting",
+ "groundneedle", "topline",
+ "wandoo", "umangite",
+ "ordinant", "unachievable",
+ "oversand", "snare",
+ "avengeful", "unexplicit",
+ "mustafina", "sonable",
+ "rehabilitative", "eulogization",
+ "papery", "technopsychology",
+ "impressor", "cresylite",
+ "entame", "transudatory",
+ "scotale", "pachydermatoid",
+ "imaginary", "yeat",
+ "slipped", "stewardship",
+ "adatom", "cockstone",
+ "skyshine", "heavenful",
+ "comparability", "exprobratory",
+ "dermorhynchous", "parquet",
+ "cretaceous", "vesperal",
+ "raphis", "undangered",
+ "Glecoma", "engrain",
+ "counteractively", "Zuludom",
+ "orchiocatabasis", "Auriculariales",
+ "warriorwise", "extraorganismal",
+ "overbuilt", "alveolite",
+ "tetchy", "terrificness",
+ "widdle", "unpremonished",
+ "rebilling", "sequestrum",
+ "equiconvex", "heliocentricism",
+ "catabaptist", "okonite",
+ "propheticism", "helminthagogic",
+ "calycular", "giantly",
+ "wingable", "golem",
+ "unprovided", "commandingness",
+ "greave", "haply",
+ "doina", "depressingly",
+ "subdentate", "impairment",
+ "decidable", "neurotrophic",
+ "unpredict", "bicorporeal",
+ "pendulant", "flatman",
+ "intrabred", "toplike",
+ "Prosobranchiata", "farrantly",
+ "toxoplasmosis", "gorilloid",
+ "dipsomaniacal", "aquiline",
+ "atlantite", "ascitic",
+ "perculsive", "prospectiveness",
+ "saponaceous", "centrifugalization",
+ "dinical", "infravaginal",
+ "beadroll", "affaite",
+ "Helvidian", "tickleproof",
+ "abstractionism", "enhedge",
+ "outwealth", "overcontribute",
+ "coldfinch", "gymnastic",
+ "Pincian", "Munychian",
+ "codisjunct", "quad",
+ "coracomandibular", "phoenicochroite",
+ "amender", "selectivity",
+ "putative", "semantician",
+ "lophotrichic", "Spatangoidea",
+ "saccharogenic", "inferent",
+ "Triconodonta", "arrendation",
+ "sheepskin", "taurocolla",
+ "bunghole", "Machiavel",
+ "triakistetrahedral", "dehairer",
+ "prezygapophysial", "cylindric",
+ "pneumonalgia", "sleigher",
+ "emir", "Socraticism",
+ "licitness", "massedly",
+ "instructiveness", "sturdied",
+ "redecrease", "starosta",
+ "evictor", "orgiastic",
+ "squdge", "meloplasty",
+ "Tsonecan", "repealableness",
+ "swoony", "myesthesia",
+ "molecule", "autobiographist",
+ "reciprocation", "refective",
+ "unobservantness", "tricae",
+ "ungouged", "floatability",
+ "Mesua", "fetlocked",
+ "chordacentrum", "sedentariness",
+ "various", "laubanite",
+ "nectopod", "zenick",
+ "sequentially", "analgic",
+ "biodynamics", "posttraumatic",
+ "nummi", "pyroacetic",
+ "bot", "redescend",
+ "dispermy", "undiffusive",
+ "circular", "trillion",
+ "Uraniidae", "ploration",
+ "discipular", "potentness",
+ "sud", "Hu",
+ "Eryon", "plugger",
+ "subdrainage", "jharal",
+ "abscission", "supermarket",
+ "countergabion", "glacierist",
+ "lithotresis", "minniebush",
+ "zanyism", "eucalypteol",
+ "sterilely", "unrealize",
+ "unpatched", "hypochondriacism",
+ "critically", "cheesecutter",
+ };
+ }
+
+ /**
+ * This is the main routine for launching a distributed random write job.
+ * It runs 10 maps/node and each node writes 1 gig of data to a DFS file.
+ * The reduce doesn't do anything.
+ *
+ * @throws IOException
+ */
+ public int run(String[] args) throws Exception {
+ if (args.length == 0) {
+ return printUsage();
+ }
+ Job job = createJob(getConf());
+ FileOutputFormat.setOutputPath(job, new Path(args[0]));
+ Date startTime = new Date();
+ System.out.println("Job started: " + startTime);
+ int ret = job.waitForCompletion(true) ? 0 : 1;
+ Date endTime = new Date();
+ System.out.println("Job ended: " + endTime);
+ System.out.println("The job took " +
+ (endTime.getTime() - startTime.getTime()) /1000 +
+ " seconds.");
+
+ return ret;
+ }
+
+ static int printUsage() {
+ System.out.println("randomtextwriter " +
+ "[-outFormat <output format class>] " +
+ "<output>");
+ ToolRunner.printGenericCommandUsage(System.out);
+ return 2;
+ }
+
+ public static void main(String[] args) throws Exception {
+ int res = ToolRunner.run(new Configuration(), new RandomTextWriterJob(),
+ args);
+ System.exit(res);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/SleepJob.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/SleepJob.java
new file mode 100644
index 0000000..b9f4a67
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/SleepJob.java
@@ -0,0 +1,276 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop;
+
+import java.io.IOException;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.mapreduce.InputFormat;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.Partitioner;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.Reducer;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+/**
+ * Dummy class for testing MR framefork. Sleeps for a defined period
+ * of time in mapper and reducer. Generates fake input for map / reduce
+ * jobs. Note that generated number of input pairs is in the order
+ * of <code>numMappers * mapSleepTime / 100</code>, so the job uses
+ * some disk space.
+ */
+public class SleepJob extends Configured implements Tool {
+ public static String MAP_SLEEP_COUNT = "mapreduce.sleepjob.map.sleep.count";
+ public static String REDUCE_SLEEP_COUNT =
+ "mapreduce.sleepjob.reduce.sleep.count";
+ public static String MAP_SLEEP_TIME = "mapreduce.sleepjob.map.sleep.time";
+ public static String REDUCE_SLEEP_TIME =
+ "mapreduce.sleepjob.reduce.sleep.time";
+
+ public static class SleepJobPartitioner extends
+ Partitioner<IntWritable, NullWritable> {
+ public int getPartition(IntWritable k, NullWritable v, int numPartitions) {
+ return k.get() % numPartitions;
+ }
+ }
+
+ public static class EmptySplit extends InputSplit implements Writable {
+ public void write(DataOutput out) throws IOException { }
+ public void readFields(DataInput in) throws IOException { }
+ public long getLength() { return 0L; }
+ public String[] getLocations() { return new String[0]; }
+ }
+
+ public static class SleepInputFormat
+ extends InputFormat<IntWritable,IntWritable> {
+
+ public List<InputSplit> getSplits(JobContext jobContext) {
+ List<InputSplit> ret = new ArrayList<InputSplit>();
+ int numSplits = jobContext.getConfiguration().
+ getInt(MRJobConfig.NUM_MAPS, 1);
+ for (int i = 0; i < numSplits; ++i) {
+ ret.add(new EmptySplit());
+ }
+ return ret;
+ }
+
+ public RecordReader<IntWritable,IntWritable> createRecordReader(
+ InputSplit ignored, TaskAttemptContext taskContext)
+ throws IOException {
+ Configuration conf = taskContext.getConfiguration();
+ final int count = conf.getInt(MAP_SLEEP_COUNT, 1);
+ if (count < 0) throw new IOException("Invalid map count: " + count);
+ final int redcount = conf.getInt(REDUCE_SLEEP_COUNT, 1);
+ if (redcount < 0)
+ throw new IOException("Invalid reduce count: " + redcount);
+ final int emitPerMapTask = (redcount * taskContext.getNumReduceTasks());
+
+ return new RecordReader<IntWritable,IntWritable>() {
+ private int records = 0;
+ private int emitCount = 0;
+ private IntWritable key = null;
+ private IntWritable value = null;
+ public void initialize(InputSplit split, TaskAttemptContext context) {
+ }
+
+ public boolean nextKeyValue()
+ throws IOException {
+ if (count == 0) {
+ return false;
+ }
+ key = new IntWritable();
+ key.set(emitCount);
+ int emit = emitPerMapTask / count;
+ if ((emitPerMapTask) % count > records) {
+ ++emit;
+ }
+ emitCount += emit;
+ value = new IntWritable();
+ value.set(emit);
+ return records++ < count;
+ }
+ public IntWritable getCurrentKey() { return key; }
+ public IntWritable getCurrentValue() { return value; }
+ public void close() throws IOException { }
+ public float getProgress() throws IOException {
+ return count == 0 ? 100 : records / ((float)count);
+ }
+ };
+ }
+ }
+
+ public static class SleepMapper
+ extends Mapper<IntWritable, IntWritable, IntWritable, NullWritable> {
+ private long mapSleepDuration = 100;
+ private int mapSleepCount = 1;
+ private int count = 0;
+
+ protected void setup(Context context)
+ throws IOException, InterruptedException {
+ Configuration conf = context.getConfiguration();
+ this.mapSleepCount =
+ conf.getInt(MAP_SLEEP_COUNT, mapSleepCount);
+ this.mapSleepDuration = mapSleepCount == 0 ? 0 :
+ conf.getLong(MAP_SLEEP_TIME , 100) / mapSleepCount;
+ }
+
+ public void map(IntWritable key, IntWritable value, Context context
+ ) throws IOException, InterruptedException {
+ //it is expected that every map processes mapSleepCount number of records.
+ try {
+ context.setStatus("Sleeping... (" +
+ (mapSleepDuration * (mapSleepCount - count)) + ") ms left");
+ Thread.sleep(mapSleepDuration);
+ }
+ catch (InterruptedException ex) {
+ throw (IOException)new IOException(
+ "Interrupted while sleeping").initCause(ex);
+ }
+ ++count;
+ // output reduceSleepCount * numReduce number of random values, so that
+ // each reducer will get reduceSleepCount number of keys.
+ int k = key.get();
+ for (int i = 0; i < value.get(); ++i) {
+ context.write(new IntWritable(k + i), NullWritable.get());
+ }
+ }
+ }
+
+ public static class SleepReducer
+ extends Reducer<IntWritable, NullWritable, NullWritable, NullWritable> {
+ private long reduceSleepDuration = 100;
+ private int reduceSleepCount = 1;
+ private int count = 0;
+
+ protected void setup(Context context)
+ throws IOException, InterruptedException {
+ Configuration conf = context.getConfiguration();
+ this.reduceSleepCount =
+ conf.getInt(REDUCE_SLEEP_COUNT, reduceSleepCount);
+ this.reduceSleepDuration = reduceSleepCount == 0 ? 0 :
+ conf.getLong(REDUCE_SLEEP_TIME , 100) / reduceSleepCount;
+ }
+
+ public void reduce(IntWritable key, Iterable<NullWritable> values,
+ Context context)
+ throws IOException {
+ try {
+ context.setStatus("Sleeping... (" +
+ (reduceSleepDuration * (reduceSleepCount - count)) + ") ms left");
+ Thread.sleep(reduceSleepDuration);
+
+ }
+ catch (InterruptedException ex) {
+ throw (IOException)new IOException(
+ "Interrupted while sleeping").initCause(ex);
+ }
+ count++;
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ int res = ToolRunner.run(new Configuration(), new SleepJob(), args);
+ System.exit(res);
+ }
+
+ public Job createJob(int numMapper, int numReducer,
+ long mapSleepTime, int mapSleepCount,
+ long reduceSleepTime, int reduceSleepCount)
+ throws IOException {
+ Configuration conf = getConf();
+ conf.setLong(MAP_SLEEP_TIME, mapSleepTime);
+ conf.setLong(REDUCE_SLEEP_TIME, reduceSleepTime);
+ conf.setInt(MAP_SLEEP_COUNT, mapSleepCount);
+ conf.setInt(REDUCE_SLEEP_COUNT, reduceSleepCount);
+ conf.setInt(MRJobConfig.NUM_MAPS, numMapper);
+ Job job = Job.getInstance(conf, "sleep");
+ job.setNumReduceTasks(numReducer);
+ job.setJarByClass(SleepJob.class);
+ job.setNumReduceTasks(numReducer);
+ job.setMapperClass(SleepMapper.class);
+ job.setMapOutputKeyClass(IntWritable.class);
+ job.setMapOutputValueClass(NullWritable.class);
+ job.setReducerClass(SleepReducer.class);
+ job.setOutputFormatClass(NullOutputFormat.class);
+ job.setInputFormatClass(SleepInputFormat.class);
+ job.setPartitionerClass(SleepJobPartitioner.class);
+ job.setSpeculativeExecution(false);
+ job.setJobName("Sleep job");
+ FileInputFormat.addInputPath(job, new Path("ignored"));
+ return job;
+ }
+
+ public int run(String[] args) throws Exception {
+
+ if(args.length < 1) {
+ System.err.println("SleepJob [-m numMapper] [-r numReducer]" +
+ " [-mt mapSleepTime (msec)] [-rt reduceSleepTime (msec)]" +
+ " [-recordt recordSleepTime (msec)]");
+ ToolRunner.printGenericCommandUsage(System.err);
+ return 2;
+ }
+
+ int numMapper = 1, numReducer = 1;
+ long mapSleepTime = 100, reduceSleepTime = 100, recSleepTime = 100;
+ int mapSleepCount = 1, reduceSleepCount = 1;
+
+ for(int i=0; i < args.length; i++ ) {
+ if(args[i].equals("-m")) {
+ numMapper = Integer.parseInt(args[++i]);
+ }
+ else if(args[i].equals("-r")) {
+ numReducer = Integer.parseInt(args[++i]);
+ }
+ else if(args[i].equals("-mt")) {
+ mapSleepTime = Long.parseLong(args[++i]);
+ }
+ else if(args[i].equals("-rt")) {
+ reduceSleepTime = Long.parseLong(args[++i]);
+ }
+ else if (args[i].equals("-recordt")) {
+ recSleepTime = Long.parseLong(args[++i]);
+ }
+ }
+
+ // sleep for *SleepTime duration in Task by recSleepTime per record
+ mapSleepCount = (int)Math.ceil(mapSleepTime / ((double)recSleepTime));
+ reduceSleepCount = (int)Math.ceil(reduceSleepTime / ((double)recSleepTime));
+ Job job = createJob(numMapper, numReducer, mapSleepTime,
+ mapSleepCount, reduceSleepTime, reduceSleepCount);
+ return job.waitForCompletion(true) ? 0 : 1;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
new file mode 100644
index 0000000..66f8654
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
@@ -0,0 +1,445 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
+import java.util.Iterator;
+
+import junit.framework.Assert;
+
+import org.apache.avro.ipc.Server;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.Cluster;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse;
+import org.apache.hadoop.mapreduce.v2.api.records.Counter;
+import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JHConfig;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.ClientRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.api.records.ApplicationStatus;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.factory.providers.YarnRemoteExceptionFactoryProvider;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.service.AbstractService;
+import org.junit.Test;
+
+public class TestClientRedirect {
+
+ static {
+ DefaultMetricsSystem.setMiniClusterMode(true);
+ }
+
+ private static final Log LOG = LogFactory.getLog(TestClientRedirect.class);
+ private static final String RMADDRESS = "0.0.0.0:8054";
+ private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+ private static final String AMHOSTADDRESS = "0.0.0.0:10020";
+ private static final String HSHOSTADDRESS = "0.0.0.0:10021";
+ private volatile boolean amContact = false;
+ private volatile boolean hsContact = false;
+ private volatile boolean amRunning = false;
+
+ @Test
+ public void testRedirect() throws Exception {
+
+ Configuration conf = new YarnConfiguration();
+ conf.set(MRConfig.FRAMEWORK_NAME, "yarn");
+ conf.set(YarnConfiguration.APPSMANAGER_ADDRESS, RMADDRESS);
+ conf.set(JHConfig.HS_BIND_ADDRESS, HSHOSTADDRESS);
+ RMService rmService = new RMService("test");
+ rmService.init(conf);
+ rmService.start();
+
+ AMService amService = new AMService();
+ amService.init(conf);
+ amService.start(conf);
+ amRunning = true;
+
+ HistoryService historyService = new HistoryService();
+ historyService.init(conf);
+ historyService.start(conf);
+
+ LOG.info("services started");
+ Cluster cluster = new Cluster(conf);
+ org.apache.hadoop.mapreduce.JobID jobID =
+ new org.apache.hadoop.mapred.JobID("201103121733", 1);
+ org.apache.hadoop.mapreduce.Counters counters = cluster.getJob(jobID)
+ .getCounters();
+ Iterator<org.apache.hadoop.mapreduce.CounterGroup> it = counters.iterator();
+ while (it.hasNext()) {
+ org.apache.hadoop.mapreduce.CounterGroup group = it.next();
+ LOG.info("Group " + group.getDisplayName());
+ Iterator<org.apache.hadoop.mapreduce.Counter> itc = group.iterator();
+ while (itc.hasNext()) {
+ LOG.info("Counter is " + itc.next().getDisplayName());
+ }
+ }
+ Assert.assertTrue(amContact);
+
+ LOG.info("Sleeping for 5 seconds before stop for" +
+ " the client socket to not get EOF immediately..");
+ Thread.sleep(5000);
+
+ //bring down the AM service
+ amService.stop();
+ amRunning = false;
+
+ LOG.info("Sleeping for 5 seconds after stop for" +
+ " the server to exit cleanly..");
+ Thread.sleep(5000);
+
+ // Same client
+ counters = cluster.getJob(jobID).getCounters();
+ it = counters.iterator();
+ while (it.hasNext()) {
+ org.apache.hadoop.mapreduce.CounterGroup group = it.next();
+ LOG.info("Group " + group.getDisplayName());
+ Iterator<org.apache.hadoop.mapreduce.Counter> itc = group.iterator();
+ while (itc.hasNext()) {
+ LOG.info("Counter is " + itc.next().getDisplayName());
+ }
+ }
+
+ Assert.assertTrue(hsContact);
+
+ rmService.stop();
+ historyService.stop();
+ }
+
+ class RMService extends AbstractService implements ClientRMProtocol {
+ private String clientServiceBindAddress;
+ InetSocketAddress clientBindAddress;
+ private Server server;
+
+ public RMService(String name) {
+ super(name);
+ }
+
+ @Override
+ public void init(Configuration conf) {
+ clientServiceBindAddress = RMADDRESS;
+ /*
+ clientServiceBindAddress = conf.get(
+ YarnConfiguration.APPSMANAGER_ADDRESS,
+ YarnConfiguration.DEFAULT_APPSMANAGER_BIND_ADDRESS);
+ */
+ clientBindAddress = NetUtils.createSocketAddr(clientServiceBindAddress);
+ super.init(conf);
+ }
+
+ @Override
+ public void start() {
+ // All the clients to appsManager are supposed to be authenticated via
+ // Kerberos if security is enabled, so no secretManager.
+ YarnRPC rpc = YarnRPC.create(getConfig());
+ Configuration clientServerConf = new Configuration(getConfig());
+ this.server = rpc.getServer(ClientRMProtocol.class, this,
+ clientBindAddress, clientServerConf, null, 1);
+ this.server.start();
+ super.start();
+ }
+
+ @Override
+ public GetNewApplicationIdResponse getNewApplicationId(GetNewApplicationIdRequest request) throws YarnRemoteException {
+ return null;
+ }
+
+ @Override
+ public GetApplicationReportResponse getApplicationReport(
+ GetApplicationReportRequest request) throws YarnRemoteException {
+ ApplicationId applicationId = request.getApplicationId();
+ ApplicationReport application = recordFactory
+ .newRecordInstance(ApplicationReport.class);
+ application.setApplicationId(applicationId);
+ if (amRunning) {
+ application.setState(ApplicationState.RUNNING);
+ } else {
+ application.setState(ApplicationState.SUCCEEDED);
+ }
+ String[] split = AMHOSTADDRESS.split(":");
+ application.setHost(split[0]);
+ application.setRpcPort(Integer.parseInt(split[1]));
+ GetApplicationReportResponse response = recordFactory
+ .newRecordInstance(GetApplicationReportResponse.class);
+ response.setApplicationReport(application);
+ return response;
+ }
+
+ @Override
+ public SubmitApplicationResponse submitApplication(
+ SubmitApplicationRequest request) throws YarnRemoteException {
+ throw YarnRemoteExceptionFactoryProvider.getYarnRemoteExceptionFactory(
+ null).createYarnRemoteException("Test");
+ }
+
+ @Override
+ public FinishApplicationResponse finishApplication(
+ FinishApplicationRequest request) throws YarnRemoteException {
+ return null;
+ }
+
+ @Override
+ public GetClusterMetricsResponse getClusterMetrics(
+ GetClusterMetricsRequest request) throws YarnRemoteException {
+ return null;
+ }
+
+ @Override
+ public GetAllApplicationsResponse getAllApplications(
+ GetAllApplicationsRequest request) throws YarnRemoteException {
+ return null;
+ }
+
+ @Override
+ public GetClusterNodesResponse getClusterNodes(
+ GetClusterNodesRequest request) throws YarnRemoteException {
+ return null;
+ }
+
+ @Override
+ public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request)
+ throws YarnRemoteException {
+ return null;
+ }
+
+ @Override
+ public GetQueueUserAclsInfoResponse getQueueUserAcls(
+ GetQueueUserAclsInfoRequest request) throws YarnRemoteException {
+ return null;
+ }
+ }
+
+ class HistoryService extends AMService {
+ public HistoryService() {
+ super(HSHOSTADDRESS);
+ }
+
+ @Override
+ public GetCountersResponse getCounters(GetCountersRequest request) throws YarnRemoteException {
+ hsContact = true;
+ Counters counters = getMyCounters();
+ GetCountersResponse response = recordFactory.newRecordInstance(GetCountersResponse.class);
+ response.setCounters(counters);
+ return response;
+ }
+ }
+
+ class AMService extends AbstractService
+ implements MRClientProtocol {
+ private InetSocketAddress bindAddress;
+ private Server server;
+ private final String hostAddress;
+ public AMService() {
+ this(AMHOSTADDRESS);
+ }
+
+ public AMService(String hostAddress) {
+ super("AMService");
+ this.hostAddress = hostAddress;
+ }
+
+ public void start(Configuration conf) {
+ YarnRPC rpc = YarnRPC.create(conf);
+ //TODO : use fixed port ??
+ InetSocketAddress address = NetUtils.createSocketAddr(hostAddress);
+ InetAddress hostNameResolved = null;
+ try {
+ address.getAddress();
+ hostNameResolved = InetAddress.getLocalHost();
+ } catch (UnknownHostException e) {
+ throw new YarnException(e);
+ }
+
+ server =
+ rpc.getServer(MRClientProtocol.class, this, address,
+ conf, null, 1);
+ server.start();
+ this.bindAddress =
+ NetUtils.createSocketAddr(hostNameResolved.getHostAddress()
+ + ":" + server.getPort());
+ super.start();
+ }
+
+ public void stop() {
+ server.close();
+ super.stop();
+ }
+
+ @Override
+ public GetCountersResponse getCounters(GetCountersRequest request)
+ throws YarnRemoteException {
+ JobId jobID = request.getJobId();
+
+ amContact = true;
+
+ Counters counters = getMyCounters();
+ GetCountersResponse response = recordFactory
+ .newRecordInstance(GetCountersResponse.class);
+ response.setCounters(counters);
+ return response;
+ }
+
+ @Override
+ public GetJobReportResponse getJobReport(GetJobReportRequest request)
+ throws YarnRemoteException {
+
+ amContact = true;
+
+ JobReport jobReport = recordFactory.newRecordInstance(JobReport.class);
+ jobReport.setJobId(request.getJobId());
+ jobReport.setJobState(JobState.RUNNING);
+ GetJobReportResponse response = recordFactory
+ .newRecordInstance(GetJobReportResponse.class);
+ response.setJobReport(jobReport);
+ return response;
+ }
+
+ @Override
+ public GetTaskReportResponse getTaskReport(GetTaskReportRequest request)
+ throws YarnRemoteException {
+ return null;
+ }
+
+ @Override
+ public GetTaskAttemptReportResponse getTaskAttemptReport(
+ GetTaskAttemptReportRequest request) throws YarnRemoteException {
+ return null;
+ }
+
+ @Override
+ public GetTaskAttemptCompletionEventsResponse
+ getTaskAttemptCompletionEvents(
+ GetTaskAttemptCompletionEventsRequest request)
+ throws YarnRemoteException {
+ return null;
+ }
+
+ @Override
+ public GetTaskReportsResponse
+ getTaskReports(GetTaskReportsRequest request)
+ throws YarnRemoteException {
+ return null;
+ }
+
+ @Override
+ public GetDiagnosticsResponse
+ getDiagnostics(GetDiagnosticsRequest request)
+ throws YarnRemoteException {
+ return null;
+ }
+
+ @Override
+ public KillJobResponse killJob(KillJobRequest request)
+ throws YarnRemoteException {
+ return null;
+ }
+
+ @Override
+ public KillTaskResponse killTask(KillTaskRequest request)
+ throws YarnRemoteException {
+ return null;
+ }
+
+ @Override
+ public KillTaskAttemptResponse killTaskAttempt(
+ KillTaskAttemptRequest request) throws YarnRemoteException {
+ return null;
+ }
+
+ @Override
+ public FailTaskAttemptResponse failTaskAttempt(
+ FailTaskAttemptRequest request) throws YarnRemoteException {
+ return null;
+ }
+ }
+
+ static Counters getMyCounters() {
+ Counter counter = recordFactory.newRecordInstance(Counter.class);
+ counter.setName("Mycounter");
+ counter.setDisplayName("My counter display name");
+ counter.setValue(12345);
+
+ CounterGroup group = recordFactory
+ .newRecordInstance(CounterGroup.class);
+ group.setName("MyGroup");
+ group.setDisplayName("My groupd display name");
+ group.setCounter("myCounter", counter);
+
+ Counters counters = recordFactory.newRecordInstance(Counters.class);
+ counters.setCounterGroup("myGroupd", group);
+ return counters;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
new file mode 100644
index 0000000..4155858
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
@@ -0,0 +1,125 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapred.ShuffleHandler;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.server.MiniYARNCluster;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.NMConfig;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServices;
+import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.service.Service;
+
+/**
+ * Configures and starts the MR-specific components in the YARN cluster.
+ *
+ */
+public class MiniMRYarnCluster extends MiniYARNCluster {
+
+ public static final String APPJAR =
+ "../hadoop-mapreduce-client-app/target/"
+ + MRConstants.HADOOP_MAPREDUCE_CLIENT_APP_JAR_NAME;
+
+ private static final Log LOG = LogFactory.getLog(MiniMRYarnCluster.class);
+ private JobHistoryServer historyServer;
+ private JobHistoryServerWrapper historyServerWrapper;
+
+ public MiniMRYarnCluster(String testName) {
+ super(testName);
+ //TODO: add the history server
+ historyServerWrapper = new JobHistoryServerWrapper();
+ addService(historyServerWrapper);
+ }
+
+ @Override
+ public void init(Configuration conf) {
+ conf.set(MRConfig.FRAMEWORK_NAME, "yarn");
+ conf.set(MRJobConfig.USER_NAME, System.getProperty("user.name"));
+ conf.set(MRConstants.APPS_STAGING_DIR_KEY, new File(getTestWorkDir(),
+ "apps_staging_dir/${user.name}/").getAbsolutePath());
+ conf.set(MRConfig.MASTER_ADDRESS, "test"); // The default is local because of
+ // which shuffle doesn't happen
+ //configure the shuffle service in NM
+ conf.setStrings(AuxServices.AUX_SERVICES,
+ new String[] { ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID });
+ conf.setClass(String.format(AuxServices.AUX_SERVICE_CLASS_FMT,
+ ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID), ShuffleHandler.class,
+ Service.class);
+ conf.setClass(NMConfig.NM_CONTAINER_EXECUTOR_CLASS,
+ DefaultContainerExecutor.class, ContainerExecutor.class);
+
+ // TestMRJobs is for testing non-uberized operation only; see TestUberAM
+ // for corresponding uberized tests.
+ conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
+
+ super.init(conf);
+ }
+
+ private class JobHistoryServerWrapper extends AbstractService {
+ public JobHistoryServerWrapper() {
+ super(JobHistoryServerWrapper.class.getName());
+ }
+
+ @Override
+ public synchronized void start() {
+ try {
+ historyServer = new JobHistoryServer();
+ historyServer.init(getConfig());
+ new Thread() {
+ public void run() {
+ historyServer.start();
+ };
+ }.start();
+ while (historyServer.getServiceState() == STATE.INITED) {
+ LOG.info("Waiting for HistoryServer to start...");
+ Thread.sleep(1500);
+ }
+ if (historyServer.getServiceState() != STATE.STARTED) {
+ throw new IOException("HistoryServer failed to start");
+ }
+ super.start();
+ } catch (Throwable t) {
+ throw new YarnException(t);
+ }
+ }
+
+ @Override
+ public synchronized void stop() {
+ if (historyServer != null) {
+ historyServer.stop();
+ }
+ super.stop();
+ }
+ }
+
+ public JobHistoryServer getHistoryServer() {
+ return this.historyServer;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
new file mode 100644
index 0000000..fae2aa0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
@@ -0,0 +1,458 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.net.URI;
+import java.security.PrivilegedExceptionAction;
+import java.util.jar.JarOutputStream;
+import java.util.zip.ZipEntry;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.FailingMapper;
+import org.apache.hadoop.RandomTextWriterJob;
+import org.apache.hadoop.RandomTextWriterJob.RandomInputFormat;
+import org.apache.hadoop.SleepJob;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Counters;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobCounter;
+import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.TaskCompletionEvent;
+import org.apache.hadoop.mapreduce.TaskID;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
+import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.YarnServerConfig;
+import org.apache.hadoop.yarn.server.nodemanager.NMConfig;
+import org.apache.hadoop.yarn.server.resourcemanager.RMConfig;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestMRJobs {
+
+ private static final Log LOG = LogFactory.getLog(TestMRJobs.class);
+
+ protected static MiniMRYarnCluster mrCluster;
+
+ private static Configuration conf = new Configuration();
+ private static FileSystem localFs;
+ static {
+ try {
+ localFs = FileSystem.getLocal(conf);
+ } catch (IOException io) {
+ throw new RuntimeException("problem getting local fs", io);
+ }
+ }
+
+ private static Path TEST_ROOT_DIR = new Path("target",
+ TestMRJobs.class.getName() + "-tmpDir").makeQualified(localFs);
+ static Path APP_JAR = new Path(TEST_ROOT_DIR, "MRAppJar.jar");
+
+ @BeforeClass
+ public static void setup() throws IOException {
+
+ if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
+ LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ + " not found. Not running test.");
+ return;
+ }
+
+ if (mrCluster == null) {
+ mrCluster = new MiniMRYarnCluster(TestMRJobs.class.getName());
+ mrCluster.init(new Configuration());
+ mrCluster.start();
+ }
+
+ // Copy MRAppJar and make it private. TODO: FIXME. This is a hack to
+ // workaround the absent public discache.
+ localFs.copyFromLocalFile(new Path(MiniMRYarnCluster.APPJAR), APP_JAR);
+ localFs.setPermission(APP_JAR, new FsPermission("700"));
+ }
+
+ @AfterClass
+ public static void tearDown() {
+ if (mrCluster != null) {
+ mrCluster.stop();
+ mrCluster = null;
+ }
+ }
+
+ @Test
+ public void testSleepJob() throws IOException, InterruptedException,
+ ClassNotFoundException {
+ LOG.info("\n\n\nStarting testSleepJob().");
+
+ if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
+ LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ + " not found. Not running test.");
+ return;
+ }
+
+ SleepJob sleepJob = new SleepJob();
+ sleepJob.setConf(mrCluster.getConfig());
+
+ int numReduces = mrCluster.getConfig().getInt("TestMRJobs.testSleepJob.reduces", 2); // or mrCluster.getConfig().getInt(MRJobConfig.NUM_REDUCES, 2);
+
+ // job with 3 maps (10s) and numReduces reduces (5s), 1 "record" each:
+ Job job = sleepJob.createJob(3, numReduces, 10000, 1, 5000, 1);
+
+ job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
+ job.setJarByClass(SleepJob.class);
+ job.setMaxMapAttempts(1); // speed up failures
+ job.waitForCompletion(true);
+ boolean succeeded = job.waitForCompletion(true);
+ Assert.assertTrue(succeeded);
+ Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
+ verifySleepJobCounters(job);
+
+
+ // TODO later: add explicit "isUber()" checks of some sort (extend
+ // JobStatus?)--compare against MRJobConfig.JOB_UBERTASK_ENABLE value
+ }
+
+ protected void verifySleepJobCounters(Job job) throws InterruptedException,
+ IOException {
+ Counters counters = job.getCounters();
+ Assert.assertEquals(3, counters.findCounter(JobCounter.OTHER_LOCAL_MAPS)
+ .getValue());
+ Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
+ .getValue());
+ Assert.assertEquals(2,
+ counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue());
+ Assert
+ .assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
+ && counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
+ Assert
+ .assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
+ && counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
+ }
+
+ @Test
+ public void testRandomWriter() throws IOException, InterruptedException,
+ ClassNotFoundException {
+
+ LOG.info("\n\n\nStarting testRandomWriter().");
+ if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
+ LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ + " not found. Not running test.");
+ return;
+ }
+
+ RandomTextWriterJob randomWriterJob = new RandomTextWriterJob();
+ mrCluster.getConfig().set(RandomTextWriterJob.TOTAL_BYTES, "3072");
+ mrCluster.getConfig().set(RandomTextWriterJob.BYTES_PER_MAP, "1024");
+ Job job = randomWriterJob.createJob(mrCluster.getConfig());
+ Path outputDir =
+ new Path(mrCluster.getTestWorkDir().getAbsolutePath(), "random-output");
+ FileOutputFormat.setOutputPath(job, outputDir);
+ job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
+ job.setJarByClass(RandomTextWriterJob.class);
+ job.setMaxMapAttempts(1); // speed up failures
+ boolean succeeded = job.waitForCompletion(true);
+ Assert.assertTrue(succeeded);
+ Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
+ // Make sure there are three files in the output-dir
+
+ RemoteIterator<FileStatus> iterator =
+ FileContext.getFileContext(mrCluster.getConfig()).listStatus(
+ outputDir);
+ int count = 0;
+ while (iterator.hasNext()) {
+ FileStatus file = iterator.next();
+ if (!file.getPath().getName()
+ .equals(FileOutputCommitter.SUCCEEDED_FILE_NAME)) {
+ count++;
+ }
+ }
+ Assert.assertEquals("Number of part files is wrong!", 3, count);
+ verifyRandomWriterCounters(job);
+
+ // TODO later: add explicit "isUber()" checks of some sort
+ }
+
+ protected void verifyRandomWriterCounters(Job job)
+ throws InterruptedException, IOException {
+ Counters counters = job.getCounters();
+ Assert.assertEquals(3, counters.findCounter(JobCounter.OTHER_LOCAL_MAPS)
+ .getValue());
+ Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
+ .getValue());
+ Assert
+ .assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
+ && counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
+ }
+
+ @Test
+ public void testFailingMapper() throws IOException, InterruptedException,
+ ClassNotFoundException {
+
+ LOG.info("\n\n\nStarting testFailingMapper().");
+
+ if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
+ LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ + " not found. Not running test.");
+ return;
+ }
+
+ Job job = runFailingMapperJob();
+
+ TaskID taskID = new TaskID(job.getJobID(), TaskType.MAP, 0);
+ TaskAttemptID aId = new TaskAttemptID(taskID, 0);
+ System.out.println("Diagnostics for " + aId + " :");
+ for (String diag : job.getTaskDiagnostics(aId)) {
+ System.out.println(diag);
+ }
+ aId = new TaskAttemptID(taskID, 1);
+ System.out.println("Diagnostics for " + aId + " :");
+ for (String diag : job.getTaskDiagnostics(aId)) {
+ System.out.println(diag);
+ }
+
+ TaskCompletionEvent[] events = job.getTaskCompletionEvents(0, 2);
+ Assert.assertEquals(TaskCompletionEvent.Status.FAILED,
+ events[0].getStatus().FAILED);
+ Assert.assertEquals(TaskCompletionEvent.Status.FAILED,
+ events[1].getStatus().FAILED);
+ Assert.assertEquals(JobStatus.State.FAILED, job.getJobState());
+ verifyFailingMapperCounters(job);
+
+ // TODO later: add explicit "isUber()" checks of some sort
+ }
+
+ protected void verifyFailingMapperCounters(Job job)
+ throws InterruptedException, IOException {
+ Counters counters = job.getCounters();
+ Assert.assertEquals(2, counters.findCounter(JobCounter.OTHER_LOCAL_MAPS)
+ .getValue());
+ Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
+ .getValue());
+ Assert.assertEquals(2, counters.findCounter(JobCounter.NUM_FAILED_MAPS)
+ .getValue());
+ Assert
+ .assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
+ && counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
+ }
+
+ protected Job runFailingMapperJob()
+ throws IOException, InterruptedException, ClassNotFoundException {
+ Configuration myConf = new Configuration(mrCluster.getConfig());
+ myConf.setInt(MRJobConfig.NUM_MAPS, 1);
+ myConf.setInt("mapreduce.task.timeout", 10*1000);//reduce the timeout
+ myConf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 2); //reduce the number of attempts
+
+ Job job = new Job(myConf);
+
+ job.setJarByClass(FailingMapper.class);
+ job.setJobName("failmapper");
+ job.setOutputKeyClass(Text.class);
+ job.setOutputValueClass(Text.class);
+ job.setInputFormatClass(RandomInputFormat.class);
+ job.setOutputFormatClass(TextOutputFormat.class);
+ job.setMapperClass(FailingMapper.class);
+ job.setNumReduceTasks(0);
+
+ FileOutputFormat.setOutputPath(job,
+ new Path(mrCluster.getTestWorkDir().getAbsolutePath(),
+ "failmapper-output"));
+ job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
+ boolean succeeded = job.waitForCompletion(true);
+ Assert.assertFalse(succeeded);
+
+ return job;
+ }
+
+//@Test
+ public void testSleepJobWithSecurityOn() throws IOException,
+ InterruptedException, ClassNotFoundException {
+
+ LOG.info("\n\n\nStarting testSleepJobWithSecurityOn().");
+
+ if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
+ return;
+ }
+
+ mrCluster.getConfig().set(
+ CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+ "kerberos");
+ mrCluster.getConfig().set(RMConfig.RM_KEYTAB, "/etc/krb5.keytab");
+ mrCluster.getConfig().set(NMConfig.NM_KEYTAB, "/etc/krb5.keytab");
+ mrCluster.getConfig().set(YarnConfiguration.RM_SERVER_PRINCIPAL_KEY,
+ "rm/sightbusy-lx@LOCALHOST");
+ mrCluster.getConfig().set(YarnServerConfig.NM_SERVER_PRINCIPAL_KEY,
+ "nm/sightbusy-lx@LOCALHOST");
+ UserGroupInformation.setConfiguration(mrCluster.getConfig());
+
+ // Keep it in here instead of after RM/NM as multiple user logins happen in
+ // the same JVM.
+ UserGroupInformation user = UserGroupInformation.getCurrentUser();
+
+ LOG.info("User name is " + user.getUserName());
+ for (Token<? extends TokenIdentifier> str : user.getTokens()) {
+ LOG.info("Token is " + str.encodeToUrlString());
+ }
+ user.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ SleepJob sleepJob = new SleepJob();
+ sleepJob.setConf(mrCluster.getConfig());
+ Job job = sleepJob.createJob(3, 0, 10000, 1, 0, 0);
+ // //Job with reduces
+ // Job job = sleepJob.createJob(3, 2, 10000, 1, 10000, 1);
+ job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
+ job.waitForCompletion(true);
+ Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
+ return null;
+ }
+ });
+
+ // TODO later: add explicit "isUber()" checks of some sort
+ }
+
+ public static class DistributedCacheChecker extends
+ Mapper<LongWritable, Text, NullWritable, NullWritable> {
+
+ @Override
+ public void setup(Context context) throws IOException {
+ Configuration conf = context.getConfiguration();
+ Path[] files = context.getLocalCacheFiles();
+ Path[] archives = context.getLocalCacheArchives();
+ FileSystem fs = LocalFileSystem.get(conf);
+
+ // Check that 3(2+ appjar) files and 2 archives are present
+ Assert.assertEquals(3, files.length);
+ Assert.assertEquals(2, archives.length);
+
+ // Check lengths of the files
+ Assert.assertEquals(1, fs.getFileStatus(files[0]).getLen());
+ Assert.assertTrue(fs.getFileStatus(files[1]).getLen() > 1);
+
+ // Check extraction of the archive
+ Assert.assertTrue(fs.exists(new Path(archives[0],
+ "distributed.jar.inside3")));
+ Assert.assertTrue(fs.exists(new Path(archives[1],
+ "distributed.jar.inside4")));
+
+ // Check the class loaders
+ LOG.info("Java Classpath: " + System.getProperty("java.class.path"));
+ ClassLoader cl = Thread.currentThread().getContextClassLoader();
+ // Both the file and the archive should have been added to classpath, so
+ // both should be reachable via the class loader.
+ Assert.assertNotNull(cl.getResource("distributed.jar.inside2"));
+ Assert.assertNotNull(cl.getResource("distributed.jar.inside3"));
+ Assert.assertNull(cl.getResource("distributed.jar.inside4"));
+
+ // Check that the symlink for the renaming was created in the cwd;
+ File symlinkFile = new File("distributed.first.symlink");
+ Assert.assertTrue(symlinkFile.exists());
+ Assert.assertEquals(1, symlinkFile.length());
+ }
+ }
+
+ @Test
+ public void testDistributedCache() throws Exception {
+ if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
+ LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ + " not found. Not running test.");
+ return;
+ }
+
+ // Create a temporary file of length 1.
+ Path first = createTempFile("distributed.first", "x");
+ // Create two jars with a single file inside them.
+ Path second =
+ makeJar(new Path(TEST_ROOT_DIR, "distributed.second.jar"), 2);
+ Path third =
+ makeJar(new Path(TEST_ROOT_DIR, "distributed.third.jar"), 3);
+ Path fourth =
+ makeJar(new Path(TEST_ROOT_DIR, "distributed.fourth.jar"), 4);
+
+ Job job = Job.getInstance(mrCluster.getConfig());
+ job.setJarByClass(DistributedCacheChecker.class);
+ job.setMapperClass(DistributedCacheChecker.class);
+ job.setOutputFormatClass(NullOutputFormat.class);
+
+ FileInputFormat.setInputPaths(job, first);
+ // Creates the Job Configuration
+ job.addCacheFile(
+ new URI(first.toUri().toString() + "#distributed.first.symlink"));
+ job.addFileToClassPath(second);
+ job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
+ job.addArchiveToClassPath(third);
+ job.addCacheArchive(fourth.toUri());
+ job.createSymlink();
+ job.setMaxMapAttempts(1); // speed up failures
+
+ job.submit();
+ Assert.assertTrue(job.waitForCompletion(false));
+ }
+
+ private Path createTempFile(String filename, String contents)
+ throws IOException {
+ Path path = new Path(TEST_ROOT_DIR, filename);
+ FSDataOutputStream os = localFs.create(path);
+ os.writeBytes(contents);
+ os.close();
+ localFs.setPermission(path, new FsPermission("700"));
+ return path;
+ }
+
+ private Path makeJar(Path p, int index) throws FileNotFoundException,
+ IOException {
+ FileOutputStream fos =
+ new FileOutputStream(new File(p.toUri().getPath()));
+ JarOutputStream jos = new JarOutputStream(fos);
+ ZipEntry ze = new ZipEntry("distributed.jar.inside" + index);
+ jos.putNextEntry(ze);
+ jos.write(("inside the jar!" + index).getBytes());
+ jos.closeEntry();
+ jos.close();
+ localFs.setPermission(p, new FsPermission("700"));
+ return p;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java
new file mode 100644
index 0000000..a5315a5
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java
@@ -0,0 +1,132 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.Assert;
+
+import org.apache.avro.ipc.AvroRemoteException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.FailingMapper;
+import org.apache.hadoop.SleepJob;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.mapreduce.Counters;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
+import org.junit.Before;
+import org.junit.After;
+import org.junit.Test;
+
+public class TestMRJobsWithHistoryService {
+
+ private static final Log LOG =
+ LogFactory.getLog(TestMRJobsWithHistoryService.class);
+
+ private static MiniMRYarnCluster mrCluster;
+
+ private static Configuration conf = new Configuration();
+ private static FileSystem localFs;
+ static {
+ try {
+ localFs = FileSystem.getLocal(conf);
+ } catch (IOException io) {
+ throw new RuntimeException("problem getting local fs", io);
+ }
+ }
+
+ private static Path TEST_ROOT_DIR = new Path("target",
+ TestMRJobs.class.getName() + "-tmpDir").makeQualified(localFs);
+ static Path APP_JAR = new Path(TEST_ROOT_DIR, "MRAppJar.jar");
+
+ @Before
+ public void setup() throws InterruptedException, IOException {
+
+ if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
+ LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ + " not found. Not running test.");
+ return;
+ }
+
+ if (mrCluster == null) {
+ mrCluster = new MiniMRYarnCluster(getClass().getName());
+ mrCluster.init(new Configuration());
+ mrCluster.start();
+ }
+
+ // Copy MRAppJar and make it private. TODO: FIXME. This is a hack to
+ // workaround the absent public discache.
+ localFs.copyFromLocalFile(new Path(MiniMRYarnCluster.APPJAR), APP_JAR);
+ localFs.setPermission(APP_JAR, new FsPermission("700"));
+ }
+
+ @After
+ public void tearDown() {
+ if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
+ LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ + " not found. Not running test.");
+ return;
+ }
+
+ if (mrCluster != null) {
+ mrCluster.stop();
+ }
+ }
+
+ @Test
+ public void testJobHistoryData() throws IOException, InterruptedException,
+ AvroRemoteException, ClassNotFoundException {
+ if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
+ LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ + " not found. Not running test.");
+ return;
+ }
+
+ SleepJob sleepJob = new SleepJob();
+ sleepJob.setConf(mrCluster.getConfig());
+ // Job with 3 maps and 2 reduces
+ Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
+ job.setJarByClass(SleepJob.class);
+ job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
+ job.waitForCompletion(true);
+ Counters counterMR = job.getCounters();
+ ApplicationId appID = TypeConverter.toYarn(job.getJobID()).getAppId();
+ while (true) {
+ Thread.sleep(1000);
+ if (mrCluster.getResourceManager().getRMContext().getRMApps()
+ .get(appID).getState().equals(RMAppState.FINISHED))
+ break;
+ }
+ Counters counterHS = job.getCounters();
+ //TODO the Assert below worked. need to check
+ //Should we compare each field or convert to V2 counter and compare
+ LOG.info("CounterHS " + counterHS);
+ LOG.info("CounterMR " + counterMR);
+ Assert.assertEquals(counterHS, counterMR);
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMROldApiJobs.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMROldApiJobs.java
new file mode 100644
index 0000000..79a14fc
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMROldApiJobs.java
@@ -0,0 +1,215 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2;
+
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.CustomOutputCommitter;
+import org.apache.hadoop.FailMapper;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.FileInputFormat;
+import org.apache.hadoop.mapred.FileOutputFormat;
+import org.apache.hadoop.mapred.JobClient;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.RunningJob;
+import org.apache.hadoop.mapred.TextInputFormat;
+import org.apache.hadoop.mapred.lib.IdentityMapper;
+import org.apache.hadoop.mapred.lib.IdentityReducer;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.filecache.DistributedCache;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestMROldApiJobs {
+
+ private static final Log LOG = LogFactory.getLog(TestMROldApiJobs.class);
+
+ protected static MiniMRYarnCluster mrCluster;
+ private static Configuration conf = new Configuration();
+ private static FileSystem localFs;
+ static {
+ try {
+ localFs = FileSystem.getLocal(conf);
+ } catch (IOException io) {
+ throw new RuntimeException("problem getting local fs", io);
+ }
+ }
+
+ @BeforeClass
+ public static void setup() throws IOException {
+
+ if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
+ LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ + " not found. Not running test.");
+ return;
+ }
+
+ if (mrCluster == null) {
+ mrCluster = new MiniMRYarnCluster(TestMROldApiJobs.class.getName());
+ mrCluster.init(new Configuration());
+ mrCluster.start();
+ }
+
+ // TestMRJobs is for testing non-uberized operation only; see TestUberAM
+ // for corresponding uberized tests.
+ mrCluster.getConfig().setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
+
+ // Copy MRAppJar and make it private. TODO: FIXME. This is a hack to
+ // workaround the absent public discache.
+ localFs.copyFromLocalFile(new Path(MiniMRYarnCluster.APPJAR), TestMRJobs.APP_JAR);
+ localFs.setPermission(TestMRJobs.APP_JAR, new FsPermission("700"));
+ }
+
+ @AfterClass
+ public static void tearDown() {
+ if (mrCluster != null) {
+ mrCluster.stop();
+ mrCluster = null;
+ }
+ }
+
+ @Test
+ public void testJobSucceed() throws IOException, InterruptedException,
+ ClassNotFoundException {
+
+ LOG.info("\n\n\nStarting testJobSucceed().");
+
+ if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
+ LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ + " not found. Not running test.");
+ return;
+ }
+
+ JobConf conf = new JobConf(mrCluster.getConfig());
+
+ Path in = new Path(mrCluster.getTestWorkDir().getAbsolutePath(),
+ "in");
+ Path out = new Path(mrCluster.getTestWorkDir().getAbsolutePath(),
+ "out");
+ runJobSucceed(conf, in, out);
+
+ FileSystem fs = FileSystem.get(conf);
+ Assert.assertTrue(fs.exists(new Path(out, CustomOutputCommitter.JOB_SETUP_FILE_NAME)));
+ Assert.assertFalse(fs.exists(new Path(out, CustomOutputCommitter.JOB_ABORT_FILE_NAME)));
+ Assert.assertTrue(fs.exists(new Path(out, CustomOutputCommitter.JOB_COMMIT_FILE_NAME)));
+ Assert.assertTrue(fs.exists(new Path(out, CustomOutputCommitter.TASK_SETUP_FILE_NAME)));
+ Assert.assertFalse(fs.exists(new Path(out, CustomOutputCommitter.TASK_ABORT_FILE_NAME)));
+ Assert.assertTrue(fs.exists(new Path(out, CustomOutputCommitter.TASK_COMMIT_FILE_NAME)));
+ }
+
+ @Test
+ public void testJobFail() throws IOException, InterruptedException,
+ ClassNotFoundException {
+
+ LOG.info("\n\n\nStarting testJobFail().");
+
+ if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
+ LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ + " not found. Not running test.");
+ return;
+ }
+
+ JobConf conf = new JobConf(mrCluster.getConfig());
+
+ Path in = new Path(mrCluster.getTestWorkDir().getAbsolutePath(),
+ "fail-in");
+ Path out = new Path(mrCluster.getTestWorkDir().getAbsolutePath(),
+ "fail-out");
+ runJobFail(conf, in, out);
+
+ FileSystem fs = FileSystem.get(conf);
+ Assert.assertTrue(fs.exists(new Path(out, CustomOutputCommitter.JOB_SETUP_FILE_NAME)));
+ Assert.assertTrue(fs.exists(new Path(out, CustomOutputCommitter.JOB_ABORT_FILE_NAME)));
+ Assert.assertFalse(fs.exists(new Path(out, CustomOutputCommitter.JOB_COMMIT_FILE_NAME)));
+ Assert.assertTrue(fs.exists(new Path(out, CustomOutputCommitter.TASK_SETUP_FILE_NAME)));
+ Assert.assertTrue(fs.exists(new Path(out, CustomOutputCommitter.TASK_ABORT_FILE_NAME)));
+ Assert.assertFalse(fs.exists(new Path(out, CustomOutputCommitter.TASK_COMMIT_FILE_NAME)));
+ }
+
+ //Run a job that will be failed and wait until it completes
+ public static void runJobFail(JobConf conf, Path inDir, Path outDir)
+ throws IOException, InterruptedException {
+ conf.setJobName("test-job-fail");
+ conf.setMapperClass(FailMapper.class);
+ conf.setJarByClass(FailMapper.class);
+ conf.setReducerClass(IdentityReducer.class);
+ conf.setMaxMapAttempts(1);
+
+ boolean success = runJob(conf, inDir, outDir, 1, 0);
+ Assert.assertFalse("Job expected to fail succeeded", success);
+ }
+
+ //Run a job that will be succeeded and wait until it completes
+ public static void runJobSucceed(JobConf conf, Path inDir, Path outDir)
+ throws IOException, InterruptedException {
+ conf.setJobName("test-job-succeed");
+ conf.setMapperClass(IdentityMapper.class);
+ //conf.setJar(new File(MiniMRYarnCluster.APPJAR).getAbsolutePath());
+ conf.setReducerClass(IdentityReducer.class);
+
+ boolean success = runJob(conf, inDir, outDir, 1 , 1);
+ Assert.assertTrue("Job expected to succeed failed", success);
+ }
+
+ static boolean runJob(JobConf conf, Path inDir, Path outDir, int numMaps,
+ int numReds) throws IOException, InterruptedException {
+
+ FileSystem fs = FileSystem.get(conf);
+ if (fs.exists(outDir)) {
+ fs.delete(outDir, true);
+ }
+ if (!fs.exists(inDir)) {
+ fs.mkdirs(inDir);
+ }
+ String input = "The quick brown fox\n" + "has many silly\n"
+ + "red fox sox\n";
+ for (int i = 0; i < numMaps; ++i) {
+ DataOutputStream file = fs.create(new Path(inDir, "part-" + i));
+ file.writeBytes(input);
+ file.close();
+ }
+
+ DistributedCache.addFileToClassPath(TestMRJobs.APP_JAR, conf);
+ conf.setOutputCommitter(CustomOutputCommitter.class);
+ conf.setInputFormat(TextInputFormat.class);
+ conf.setOutputKeyClass(LongWritable.class);
+ conf.setOutputValueClass(Text.class);
+
+ FileInputFormat.setInputPaths(conf, inDir);
+ FileOutputFormat.setOutputPath(conf, outDir);
+ conf.setNumMapTasks(numMaps);
+ conf.setNumReduceTasks(numReds);
+
+ JobClient jobClient = new JobClient(conf);
+
+ RunningJob job = jobClient.submitJob(conf);
+ return jobClient.monitorAndPrintJob(conf, job);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestUberAM.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestUberAM.java
new file mode 100644
index 0000000..314f977
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestUberAM.java
@@ -0,0 +1,185 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.mapreduce.Counters;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobCounter;
+import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.TaskCompletionEvent;
+import org.apache.hadoop.mapreduce.TaskID;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+@Ignore
+public class TestUberAM extends TestMRJobs {
+
+ private static final Log LOG = LogFactory.getLog(TestUberAM.class);
+
+ @BeforeClass
+ public static void setup() throws IOException {
+ TestMRJobs.setup();
+ if (mrCluster != null) {
+ mrCluster.getConfig().setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, true);
+ }
+ }
+
+ @Override
+ public void testSleepJob()
+ throws IOException, InterruptedException, ClassNotFoundException {
+ if (mrCluster != null) {
+ mrCluster.getConfig().setInt("TestMRJobs.testSleepJob.reduces", 1);
+ }
+ super.testSleepJob();
+ }
+
+ @Override
+ protected void verifySleepJobCounters(Job job) throws InterruptedException,
+ IOException {
+ Counters counters = job.getCounters();
+
+ Assert.assertEquals(3, counters.findCounter(JobCounter.OTHER_LOCAL_MAPS)
+ .getValue());
+ Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
+ .getValue());
+ Assert.assertEquals(1,
+ counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue());
+ Assert
+ .assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
+ && counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
+ Assert
+ .assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
+ && counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
+
+ Assert.assertEquals(3, counters.findCounter(JobCounter.NUM_UBER_SUBMAPS)
+ .getValue());
+ Assert.assertEquals(1, counters.findCounter(JobCounter.NUM_UBER_SUBREDUCES)
+ .getValue());
+ Assert.assertEquals(4,
+ counters.findCounter(JobCounter.TOTAL_LAUNCHED_UBERTASKS).getValue());
+ }
+
+ @Override
+ public void testRandomWriter()
+ throws IOException, InterruptedException, ClassNotFoundException {
+ super.testRandomWriter();
+ }
+
+ @Override
+ protected void verifyRandomWriterCounters(Job job)
+ throws InterruptedException, IOException {
+ super.verifyRandomWriterCounters(job);
+ Counters counters = job.getCounters();
+ Assert.assertEquals(3, counters.findCounter(JobCounter.NUM_UBER_SUBMAPS)
+ .getValue());
+ Assert.assertEquals(3,
+ counters.findCounter(JobCounter.TOTAL_LAUNCHED_UBERTASKS).getValue());
+ }
+
+ @Override
+ public void testFailingMapper()
+ throws IOException, InterruptedException, ClassNotFoundException {
+ LOG.info("\n\n\nStarting uberized testFailingMapper().");
+
+ if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
+ LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ + " not found. Not running test.");
+ return;
+ }
+
+ Job job = runFailingMapperJob();
+
+ // should be able to get diags for single task attempt...
+ TaskID taskID = new TaskID(job.getJobID(), TaskType.MAP, 0);
+ TaskAttemptID aId = new TaskAttemptID(taskID, 0);
+ System.out.println("Diagnostics for " + aId + " :");
+ for (String diag : job.getTaskDiagnostics(aId)) {
+ System.out.println(diag);
+ }
+ // ...but not for second (shouldn't exist: uber-AM overrode max attempts)
+ boolean secondTaskAttemptExists = true;
+ try {
+ aId = new TaskAttemptID(taskID, 1);
+ System.out.println("Diagnostics for " + aId + " :");
+ for (String diag : job.getTaskDiagnostics(aId)) {
+ System.out.println(diag);
+ }
+ } catch (Exception e) {
+ secondTaskAttemptExists = false;
+ }
+ Assert.assertEquals(false, secondTaskAttemptExists);
+
+ TaskCompletionEvent[] events = job.getTaskCompletionEvents(0, 2);
+ Assert.assertEquals(1, events.length);
+ Assert.assertEquals(TaskCompletionEvent.Status.FAILED,
+ events[0].getStatus().FAILED);
+ Assert.assertEquals(JobStatus.State.FAILED, job.getJobState());
+
+ //Disabling till UberAM honors MRJobConfig.MAP_MAX_ATTEMPTS
+ //verifyFailingMapperCounters(job);
+
+ // TODO later: add explicit "isUber()" checks of some sort
+ }
+
+ @Override
+ protected void verifyFailingMapperCounters(Job job)
+ throws InterruptedException, IOException {
+ Counters counters = job.getCounters();
+ Assert.assertEquals(2, counters.findCounter(JobCounter.OTHER_LOCAL_MAPS)
+ .getValue());
+ Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
+ .getValue());
+ Assert.assertEquals(2, counters.findCounter(JobCounter.NUM_FAILED_MAPS)
+ .getValue());
+ Assert
+ .assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
+ && counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
+
+ Assert.assertEquals(2,
+ counters.findCounter(JobCounter.TOTAL_LAUNCHED_UBERTASKS).getValue());
+ Assert.assertEquals(2, counters.findCounter(JobCounter.NUM_UBER_SUBMAPS)
+ .getValue());
+ Assert.assertEquals(2, counters
+ .findCounter(JobCounter.NUM_FAILED_UBERTASKS).getValue());
+ }
+
+//@Test //FIXME: if/when the corresponding TestMRJobs test gets enabled, do so here as well (potentially with mods for ubermode)
+ public void testSleepJobWithSecurityOn()
+ throws IOException, InterruptedException, ClassNotFoundException {
+ super.testSleepJobWithSecurityOn();
+ }
+
+ // Add a test for distcache when uber mode is enabled. TODO
+ @Override
+ @Test
+ public void testDistributedCache() throws Exception {
+ //
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-shuffle/pom.xml b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-shuffle/pom.xml
new file mode 100644
index 0000000..dd67aef
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-shuffle/pom.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<project>
+ <parent>
+ <artifactId>hadoop-mapreduce-client</artifactId>
+ <groupId>org.apache.hadoop</groupId>
+ <version>${hadoop-mapreduce.version}</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-shuffle</artifactId>
+ <name>hadoop-mapreduce-client-shuffle</name>
+
+ <properties>
+ <install.file>${project.artifact.file}</install.file>
+ <mr.basedir>${project.parent.parent.basedir}</mr.basedir>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-nodemanager</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-core</artifactId>
+ </dependency>
+ </dependencies>
+
+</project>
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
new file mode 100644
index 0000000..e2620b3
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -0,0 +1,461 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+import static org.jboss.netty.buffer.ChannelBuffers.wrappedBuffer;
+import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.CONTENT_TYPE;
+import static org.jboss.netty.handler.codec.http.HttpMethod.GET;
+import static org.jboss.netty.handler.codec.http.HttpResponseStatus.BAD_REQUEST;
+import static org.jboss.netty.handler.codec.http.HttpResponseStatus.FORBIDDEN;
+import static org.jboss.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR;
+import static org.jboss.netty.handler.codec.http.HttpResponseStatus.METHOD_NOT_ALLOWED;
+import static org.jboss.netty.handler.codec.http.HttpResponseStatus.NOT_FOUND;
+import static org.jboss.netty.handler.codec.http.HttpResponseStatus.OK;
+import static org.jboss.netty.handler.codec.http.HttpResponseStatus.UNAUTHORIZED;
+import static org.jboss.netty.handler.codec.http.HttpVersion.HTTP_1_1;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.net.InetSocketAddress;
+import java.net.URL;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import javax.crypto.SecretKey;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.DataInputByteBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.mapreduce.security.SecureShuffleUtils;
+import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
+import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
+import org.apache.hadoop.mapreduce.task.reduce.ShuffleHeader;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MutableCounterInt;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.server.nodemanager.NMConfig;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServices;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
+import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.util.Records;
+import org.jboss.netty.bootstrap.ServerBootstrap;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelFactory;
+import org.jboss.netty.channel.ChannelFuture;
+import org.jboss.netty.channel.ChannelFutureListener;
+import org.jboss.netty.channel.ChannelHandlerContext;
+import org.jboss.netty.channel.ChannelPipeline;
+import org.jboss.netty.channel.ChannelPipelineFactory;
+import org.jboss.netty.channel.Channels;
+import org.jboss.netty.channel.DefaultFileRegion;
+import org.jboss.netty.channel.ExceptionEvent;
+import org.jboss.netty.channel.FileRegion;
+import org.jboss.netty.channel.MessageEvent;
+import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
+import org.jboss.netty.channel.group.ChannelGroup;
+import org.jboss.netty.channel.group.DefaultChannelGroup;
+import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
+import org.jboss.netty.handler.codec.frame.TooLongFrameException;
+import org.jboss.netty.handler.codec.http.DefaultHttpResponse;
+import org.jboss.netty.handler.codec.http.HttpChunkAggregator;
+import org.jboss.netty.handler.codec.http.HttpRequest;
+import org.jboss.netty.handler.codec.http.HttpRequestDecoder;
+import org.jboss.netty.handler.codec.http.HttpResponse;
+import org.jboss.netty.handler.codec.http.HttpResponseEncoder;
+import org.jboss.netty.handler.codec.http.HttpResponseStatus;
+import org.jboss.netty.handler.codec.http.QueryStringDecoder;
+import org.jboss.netty.handler.stream.ChunkedWriteHandler;
+import org.jboss.netty.util.CharsetUtil;
+
+public class ShuffleHandler extends AbstractService
+ implements AuxServices.AuxiliaryService {
+
+ private static final Log LOG = LogFactory.getLog(ShuffleHandler.class);
+
+ private int port;
+ private ChannelFactory selector;
+ private final ChannelGroup accepted = new DefaultChannelGroup();
+
+ public static final String MAPREDUCE_SHUFFLE_SERVICEID =
+ "mapreduce.shuffle";
+
+ private static final Map<String,String> userRsrc =
+ new ConcurrentHashMap<String,String>();
+ private static final JobTokenSecretManager secretManager =
+ new JobTokenSecretManager();
+
+ public static final String SHUFFLE_PORT = "mapreduce.shuffle.port";
+
+ @Metrics(about="Shuffle output metrics", context="mapred")
+ static class ShuffleMetrics implements ChannelFutureListener {
+ @Metric("Shuffle output in bytes")
+ MutableCounterLong shuffleOutputBytes;
+ @Metric("# of failed shuffle outputs")
+ MutableCounterInt shuffleOutputsFailed;
+ @Metric("# of succeeeded shuffle outputs")
+ MutableCounterInt shuffleOutputsOK;
+ @Metric("# of current shuffle connections")
+ MutableGaugeInt shuffleConnections;
+
+ @Override
+ public void operationComplete(ChannelFuture future) throws Exception {
+ if (future.isSuccess()) {
+ shuffleOutputsOK.incr();
+ } else {
+ shuffleOutputsFailed.incr();
+ }
+ shuffleConnections.decr();
+ }
+ }
+
+ final ShuffleMetrics metrics;
+
+ ShuffleHandler(MetricsSystem ms) {
+ super("httpshuffle");
+ metrics = ms.register(new ShuffleMetrics());
+ }
+
+ public ShuffleHandler() {
+ this(DefaultMetricsSystem.instance());
+ }
+
+ @Override
+ public void initApp(String user, ApplicationId appId, ByteBuffer secret) {
+ // TODO these bytes should be versioned
+ try {
+ DataInputByteBuffer in = new DataInputByteBuffer();
+ in.reset(secret);
+ Token<JobTokenIdentifier> jt = new Token<JobTokenIdentifier>();
+ jt.readFields(in);
+ // TODO: Once SHuffle is out of NM, this can use MR APIs
+ JobID jobId = new JobID(Long.toString(appId.getClusterTimestamp()), appId.getId());
+ userRsrc.put(jobId.toString(), user);
+ LOG.info("Added token for " + jobId.toString());
+ secretManager.addTokenForJob(jobId.toString(), jt);
+ } catch (IOException e) {
+ LOG.error("Error during initApp", e);
+ // TODO add API to AuxiliaryServices to report failures
+ }
+ }
+
+ @Override
+ public void stopApp(ApplicationId appId) {
+ JobID jobId = new JobID(Long.toString(appId.getClusterTimestamp()), appId.getId());
+ secretManager.removeTokenForJob(jobId.toString());
+ }
+
+ @Override
+ public synchronized void init(Configuration conf) {
+ selector = new NioServerSocketChannelFactory(
+ Executors.newCachedThreadPool(), Executors.newCachedThreadPool());
+ super.init(new Configuration(conf));
+ }
+
+ // TODO change AbstractService to throw InterruptedException
+ @Override
+ public synchronized void start() {
+ Configuration conf = getConfig();
+ ServerBootstrap bootstrap = new ServerBootstrap(selector);
+ bootstrap.setPipelineFactory(new HttpPipelineFactory(conf));
+ port = conf.getInt("mapreduce.shuffle.port", 8080);
+ accepted.add(bootstrap.bind(new InetSocketAddress(port)));
+ LOG.info(getName() + " listening on port " + port);
+ super.start();
+ }
+
+ @Override
+ public synchronized void stop() {
+ accepted.close().awaitUninterruptibly(10, TimeUnit.SECONDS);
+ ServerBootstrap bootstrap = new ServerBootstrap(selector);
+ bootstrap.releaseExternalResources();
+ super.stop();
+ }
+
+ Shuffle createShuffle() {
+ return new Shuffle(getConfig());
+ }
+
+ class HttpPipelineFactory implements ChannelPipelineFactory {
+
+ final Shuffle SHUFFLE;
+
+ public HttpPipelineFactory(Configuration conf) {
+ SHUFFLE = new Shuffle(conf);
+ }
+
+ @Override
+ public ChannelPipeline getPipeline() throws Exception {
+ return Channels.pipeline(
+ new HttpRequestDecoder(),
+ new HttpChunkAggregator(1 << 16),
+ new HttpResponseEncoder(),
+ new ChunkedWriteHandler(),
+ SHUFFLE);
+ // TODO factor security manager into pipeline
+ // TODO factor out encode/decode to permit binary shuffle
+ // TODO factor out decode of index to permit alt. models
+ }
+
+ }
+
+ class Shuffle extends SimpleChannelUpstreamHandler {
+
+ private final Configuration conf;
+ private final IndexCache indexCache;
+ private final LocalDirAllocator lDirAlloc =
+ new LocalDirAllocator(NMConfig.NM_LOCAL_DIR);
+
+ public Shuffle(Configuration conf) {
+ this.conf = conf;
+ indexCache = new IndexCache(new JobConf(conf));
+ }
+
+ private List<String> splitMaps(List<String> mapq) {
+ if (null == mapq) {
+ return null;
+ }
+ final List<String> ret = new ArrayList<String>();
+ for (String s : mapq) {
+ Collections.addAll(ret, s.split(","));
+ }
+ return ret;
+ }
+
+ @Override
+ public void messageReceived(ChannelHandlerContext ctx, MessageEvent evt)
+ throws Exception {
+ HttpRequest request = (HttpRequest) evt.getMessage();
+ if (request.getMethod() != GET) {
+ sendError(ctx, METHOD_NOT_ALLOWED);
+ return;
+ }
+ final Map<String,List<String>> q =
+ new QueryStringDecoder(request.getUri()).getParameters();
+ final List<String> mapIds = splitMaps(q.get("map"));
+ final List<String> reduceQ = q.get("reduce");
+ final List<String> jobQ = q.get("job");
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("RECV: " + request.getUri() +
+ "\n mapId: " + mapIds +
+ "\n reduceId: " + reduceQ +
+ "\n jobId: " + jobQ);
+ }
+
+ if (mapIds == null || reduceQ == null || jobQ == null) {
+ sendError(ctx, "Required param job, map and reduce", BAD_REQUEST);
+ return;
+ }
+ if (reduceQ.size() != 1 || jobQ.size() != 1) {
+ sendError(ctx, "Too many job/reduce parameters", BAD_REQUEST);
+ return;
+ }
+ int reduceId;
+ String jobId;
+ try {
+ reduceId = Integer.parseInt(reduceQ.get(0));
+ jobId = jobQ.get(0);
+ } catch (NumberFormatException e) {
+ sendError(ctx, "Bad reduce parameter", BAD_REQUEST);
+ return;
+ } catch (IllegalArgumentException e) {
+ sendError(ctx, "Bad job parameter", BAD_REQUEST);
+ return;
+ }
+ final String reqUri = request.getUri();
+ if (null == reqUri) {
+ // TODO? add upstream?
+ sendError(ctx, FORBIDDEN);
+ return;
+ }
+ HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
+ try {
+ verifyRequest(jobId, ctx, request, response,
+ new URL("http", "", 8080, reqUri));
+ } catch (IOException e) {
+ LOG.warn("Shuffle failure ", e);
+ sendError(ctx, e.getMessage(), UNAUTHORIZED);
+ return;
+ }
+
+ Channel ch = evt.getChannel();
+ ch.write(response);
+ // TODO refactor the following into the pipeline
+ ChannelFuture lastMap = null;
+ for (String mapId : mapIds) {
+ try {
+ lastMap =
+ sendMapOutput(ctx, ch, userRsrc.get(jobId), jobId, mapId, reduceId);
+ if (null == lastMap) {
+ sendError(ctx, NOT_FOUND);
+ return;
+ }
+ } catch (IOException e) {
+ LOG.error("Shuffle error ", e);
+ sendError(ctx, e.getMessage(), INTERNAL_SERVER_ERROR);
+ return;
+ }
+ }
+ lastMap.addListener(metrics);
+ lastMap.addListener(ChannelFutureListener.CLOSE);
+ }
+
+ private void verifyRequest(String appid, ChannelHandlerContext ctx,
+ HttpRequest request, HttpResponse response, URL requestUri)
+ throws IOException {
+ SecretKey tokenSecret = secretManager.retrieveTokenSecret(appid);
+ if (null == tokenSecret) {
+ LOG.info("Request for unknown token " + appid);
+ throw new IOException("could not find jobid");
+ }
+ // string to encrypt
+ String enc_str = SecureShuffleUtils.buildMsgFrom(requestUri);
+ // hash from the fetcher
+ String urlHashStr =
+ request.getHeader(SecureShuffleUtils.HTTP_HEADER_URL_HASH);
+ if (urlHashStr == null) {
+ LOG.info("Missing header hash for " + appid);
+ throw new IOException("fetcher cannot be authenticated");
+ }
+ if (LOG.isDebugEnabled()) {
+ int len = urlHashStr.length();
+ LOG.debug("verifying request. enc_str=" + enc_str + "; hash=..." +
+ urlHashStr.substring(len-len/2, len-1));
+ }
+ // verify - throws exception
+ SecureShuffleUtils.verifyReply(urlHashStr, enc_str, tokenSecret);
+ // verification passed - encode the reply
+ String reply =
+ SecureShuffleUtils.generateHash(urlHashStr.getBytes(), tokenSecret);
+ response.setHeader(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH, reply);
+ if (LOG.isDebugEnabled()) {
+ int len = reply.length();
+ LOG.debug("Fetcher request verfied. enc_str=" + enc_str + ";reply=" +
+ reply.substring(len-len/2, len-1));
+ }
+ }
+
+ protected ChannelFuture sendMapOutput(ChannelHandlerContext ctx, Channel ch,
+ String user, String jobId, String mapId, int reduce)
+ throws IOException {
+ // TODO replace w/ rsrc alloc
+ // $x/$user/appcache/$appId/output/$mapId
+ // TODO: Once Shuffle is out of NM, this can use MR APIs to convert between App and Job
+ JobID jobID = JobID.forName(jobId);
+ ApplicationId appID = Records.newRecord(ApplicationId.class);
+ appID.setClusterTimestamp(Long.parseLong(jobID.getJtIdentifier()));
+ appID.setId(jobID.getId());
+ final String base =
+ ContainerLocalizer.USERCACHE + "/" + user + "/"
+ + ContainerLocalizer.APPCACHE + "/"
+ + ConverterUtils.toString(appID) + "/output" + "/" + mapId;
+ LOG.debug("DEBUG0 " + base);
+ // Index file
+ Path indexFileName = lDirAlloc.getLocalPathToRead(
+ base + "/file.out.index", conf);
+ // Map-output file
+ Path mapOutputFileName = lDirAlloc.getLocalPathToRead(
+ base + "/file.out", conf);
+ LOG.debug("DEBUG1 " + base + " : " + mapOutputFileName + " : " +
+ indexFileName);
+ IndexRecord info =
+ indexCache.getIndexInformation(mapId, reduce, indexFileName, user);
+ final ShuffleHeader header =
+ new ShuffleHeader(mapId, info.partLength, info.rawLength, reduce);
+ final DataOutputBuffer dob = new DataOutputBuffer();
+ header.write(dob);
+ ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength()));
+ File spillfile = new File(mapOutputFileName.toString());
+ RandomAccessFile spill;
+ try {
+ spill = new RandomAccessFile(spillfile, "r");
+ } catch (FileNotFoundException e) {
+ LOG.info(spillfile + " not found");
+ return null;
+ }
+ final FileRegion partition = new DefaultFileRegion(
+ spill.getChannel(), info.startOffset, info.partLength);
+ ChannelFuture writeFuture = ch.write(partition);
+ writeFuture.addListener(new ChannelFutureListener() {
+ // TODO error handling; distinguish IO/connection failures,
+ // attribute to appropriate spill output
+ @Override
+ public void operationComplete(ChannelFuture future) {
+ partition.releaseExternalResources();
+ }
+ });
+ metrics.shuffleConnections.incr();
+ metrics.shuffleOutputBytes.incr(info.partLength); // optimistic
+ return writeFuture;
+ }
+
+ private void sendError(ChannelHandlerContext ctx,
+ HttpResponseStatus status) {
+ sendError(ctx, "", status);
+ }
+
+ private void sendError(ChannelHandlerContext ctx, String message,
+ HttpResponseStatus status) {
+ HttpResponse response = new DefaultHttpResponse(HTTP_1_1, status);
+ response.setHeader(CONTENT_TYPE, "text/plain; charset=UTF-8");
+ response.setContent(
+ ChannelBuffers.copiedBuffer(message, CharsetUtil.UTF_8));
+
+ // Close the connection as soon as the error message is sent.
+ ctx.getChannel().write(response).addListener(ChannelFutureListener.CLOSE);
+ }
+
+ @Override
+ public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e)
+ throws Exception {
+ Channel ch = e.getChannel();
+ Throwable cause = e.getCause();
+ if (cause instanceof TooLongFrameException) {
+ sendError(ctx, BAD_REQUEST);
+ return;
+ }
+
+ LOG.error("Shuffle error: ", cause);
+ if (ch.isConnected()) {
+ LOG.error("Shuffle error " + e);
+ sendError(ctx, INTERNAL_SERVER_ERROR);
+ }
+ }
+
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
new file mode 100644
index 0000000..97f0c97
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred;
+
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import static org.apache.hadoop.test.MetricsAsserts.*;
+
+import org.jboss.netty.channel.ChannelFuture;
+
+import org.junit.Test;
+import static org.apache.hadoop.test.MockitoMaker.*;
+
+public class TestShuffleHandler {
+ static final long MiB = 1024 * 1024;
+
+ @Test public void testShuffleMetrics() throws Exception {
+ MetricsSystem ms = new MetricsSystemImpl();
+ ShuffleHandler sh = new ShuffleHandler(ms);
+ ChannelFuture cf = make(stub(ChannelFuture.class).
+ returning(true, false).from.isSuccess());
+
+ sh.metrics.shuffleConnections.incr();
+ sh.metrics.shuffleOutputBytes.incr(1*MiB);
+ sh.metrics.shuffleConnections.incr();
+ sh.metrics.shuffleOutputBytes.incr(2*MiB);
+
+ checkShuffleMetrics(ms, 3*MiB, 0 , 0, 2);
+
+ sh.metrics.operationComplete(cf);
+ sh.metrics.operationComplete(cf);
+
+ checkShuffleMetrics(ms, 3*MiB, 1, 1, 0);
+ }
+
+ static void checkShuffleMetrics(MetricsSystem ms, long bytes, int failed,
+ int succeeded, int connections) {
+ MetricsSource source = ms.getSource("ShuffleMetrics");
+ MetricsRecordBuilder rb = getMetrics(source);
+ assertCounter("ShuffleOutputBytes", bytes, rb);
+ assertCounter("ShuffleOutputsFailed", failed, rb);
+ assertCounter("ShuffleOutputsOK", succeeded, rb);
+ assertGauge("ShuffleConnections", connections, rb);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-mr-client/pom.xml b/hadoop-mapreduce/hadoop-mr-client/pom.xml
new file mode 100644
index 0000000..9c67a27
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-mr-client/pom.xml
@@ -0,0 +1,120 @@
+<?xml version="1.0"?>
+<project>
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce</artifactId>
+ <version>${hadoop-mapreduce.version}</version>
+ </parent>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client</artifactId>
+ <name>hadoop-mapreduce-client</name>
+ <packaging>pom</packaging>
+
+ <properties>
+ <mr.basedir>${project.parent.basedir}</mr.basedir>
+ </properties>
+
+
+ <dependencyManagement>
+ <dependencies>
+ <!-- begin MNG-4223 workaround -->
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn</artifactId>
+ <version>${yarn.version}</version>
+ <type>pom</type>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-api</artifactId>
+ <version>${yarn.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server</artifactId>
+ <version>${yarn.version}</version>
+ <type>pom</type>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-common</artifactId>
+ <version>${yarn.version}</version>
+ </dependency>
+ <!-- end MNG-4223 workaround -->
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <!-- mr security depends on hdfs -->
+ <artifactId>hadoop-hdfs</artifactId>
+ <version>${hadoop-hdfs.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-common</artifactId>
+ <version>${yarn.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-common</artifactId>
+ <version>${yarn.version}</version>
+ <type>test-jar</type>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-tests</artifactId>
+ <version>${yarn.version}</version>
+ <type>test-jar</type>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-nodemanager</artifactId>
+ <version>${yarn.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
+ <version>${yarn.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-core</artifactId>
+ <version>${hadoop-mapreduce.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-common</artifactId>
+ <version>${hadoop-mapreduce.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-app</artifactId>
+ <version>${hadoop-mapreduce.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-app</artifactId>
+ <version>${hadoop-mapreduce.version}</version>
+ <type>test-jar</type>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-hs</artifactId>
+ <version>${hadoop-mapreduce.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-shuffle</artifactId>
+ <version>${hadoop-mapreduce.version}</version>
+ </dependency>
+ </dependencies>
+ </dependencyManagement>
+
+ <modules>
+ <module>hadoop-mapreduce-client-core</module>
+ <module>hadoop-mapreduce-client-common</module>
+ <module>hadoop-mapreduce-client-shuffle</module>
+ <module>hadoop-mapreduce-client-app</module>
+ <module>hadoop-mapreduce-client-jobclient</module>
+ <module>hadoop-mapreduce-client-hs</module>
+ </modules>
+</project>
diff --git a/hadoop-mapreduce/hadoop-yarn/README b/hadoop-mapreduce/hadoop-yarn/README
new file mode 100644
index 0000000..4de0646
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/README
@@ -0,0 +1,73 @@
+YARN (YET ANOTHER RESOURCE NEGOTIATOR or YARN Application Resource Negotiator)
+------------------------------------------------------------------------------
+
+Requirements
+-------------
+Java: JDK 1.6
+Maven: Maven 2
+
+Setup
+-----
+Install protobuf 2.4.0a or higher (Download from http://code.google.com/p/protobuf/downloads/list)
+ - install the protoc executable (configure, make, make install)
+ - install the maven artifact (cd java; mvn install)
+Installing protoc requires gcc 4.1.x or higher.
+If the make step fails with (Valid until a fix is released for protobuf 2.4.0a)
+ ./google/protobuf/descriptor.h:1152: error:
+ `google::protobuf::internal::Mutex*google::protobuf::DescriptorPool::mutex_'
+ is private
+ Replace descriptor.cc with http://protobuf.googlecode.com/svn-history/r380/trunk/src/google/protobuf/descriptor.cc
+
+
+Quick Maven Tips
+----------------
+clean workspace: mvn clean
+compile and test: mvn install
+skip tests: mvn install -DskipTests
+skip test execution but compile: mvn install -Dmaven.test.skip.exec=true
+skip native build: mvn -fn install -P-cbuild
+clean and test: mvn clean install
+run selected test after compile: mvn test -Dtest=TestClassName (combined: mvn clean install -Dtest=TestClassName)
+create runnable binaries after install: mvn assembly:assembly (combined: mvn clean install assembly:assembly)
+
+
+Eclipse Projects
+----------------
+http://maven.apache.org/guides/mini/guide-ide-eclipse.html
+
+1. Generate .project and .classpath files in all maven modules
+mvn eclipse:eclipse
+CAUTION: If the project structure has changed from your previous workspace, clean up all .project and .classpath files recursively. Then run:
+mvn eclipse:eclipse
+
+2. Import the projects in eclipse.
+
+3. Set the environment variable M2_REPO to point to your .m2/repository location.
+
+NetBeans Projects
+-----------------
+
+NetBeans has builtin support of maven projects. Just "Open Project..."
+and everything is setup automatically. Verified with NetBeans 6.9.1.
+
+
+Custom Hadoop Dependencies
+--------------------------
+
+By default Hadoop dependencies are specified in the top-level pom.xml
+properties section. One can override them via -Dhadoop-common.version=...
+on the command line. ~/.m2/settings.xml can also be used to specify
+these properties in different profiles, which is useful for IDEs.
+
+Modules
+-------
+YARN consists of multiple modules. The modules are listed below as per the directory structure:
+
+yarn-api - Yarn's cross platform external interface
+
+yarn-common - Utilities which can be used by yarn clients and server
+
+yarn-server - Implementation of the yarn-api
+ yarn-server-common - APIs shared between resourcemanager and nodemanager
+ yarn-server-nodemanager (TaskTracker replacement)
+ yarn-server-resourcemanager (JobTracker replacement)
diff --git a/hadoop-mapreduce/hadoop-yarn/bin/slaves.sh b/hadoop-mapreduce/hadoop-yarn/bin/slaves.sh
new file mode 100644
index 0000000..e32a451
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/bin/slaves.sh
@@ -0,0 +1,68 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Run a shell command on all slave hosts.
+#
+# Environment Variables
+#
+# YARN_SLAVES File naming remote hosts.
+# Default is ${YARN_CONF_DIR}/slaves.
+# YARN_CONF_DIR Alternate conf dir. Default is ${YARN_HOME}/conf.
+# YARN_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
+# YARN_SSH_OPTS Options passed to ssh when running remote commands.
+##
+
+usage="Usage: slaves.sh [--config confdir] command..."
+
+# if no args specified, show usage
+if [ $# -le 0 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/yarn-config.sh
+
+# If the slaves file is specified in the command line,
+# then it takes precedence over the definition in
+# yarn-env.sh. Save it here.
+HOSTLIST=$YARN_SLAVES
+
+if [ -f "${YARN_CONF_DIR}/yarn-env.sh" ]; then
+ . "${YARN_CONF_DIR}/yarn-env.sh"
+fi
+
+if [ "$HOSTLIST" = "" ]; then
+ if [ "$YARN_SLAVES" = "" ]; then
+ export HOSTLIST="${YARN_CONF_DIR}/slaves"
+ else
+ export HOSTLIST="${YARN_SLAVES}"
+ fi
+fi
+
+for slave in `cat "$HOSTLIST"|sed "s/#.*$//;/^$/d"`; do
+ ssh $YARN_SSH_OPTS $slave $"${@// /\\ }" \
+ 2>&1 | sed "s/^/$slave: /" &
+ if [ "$YARN_SLAVE_SLEEP" != "" ]; then
+ sleep $YARN_SLAVE_SLEEP
+ fi
+done
+
+wait
diff --git a/hadoop-mapreduce/hadoop-yarn/bin/start-all.sh b/hadoop-mapreduce/hadoop-yarn/bin/start-all.sh
new file mode 100644
index 0000000..43b7130
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/bin/start-all.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Start all yarn daemons. Run this on master node.
+
+echo "starting yarn daemons"
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/yarn-config.sh
+# start resourceManager
+"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager
+# start nodeManager
+"$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager
+# start historyserver
+#"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start historyserver
diff --git a/hadoop-mapreduce/hadoop-yarn/bin/stop-all.sh b/hadoop-mapreduce/hadoop-yarn/bin/stop-all.sh
new file mode 100644
index 0000000..4b3fc92
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/bin/stop-all.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Stop all yarn daemons. Run this on master node.
+
+echo "stopping yarn daemons"
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/yarn-config.sh
+# stop resourceManager
+"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR stop resourcemanager
+# stop nodeManager
+"$bin"/yarn-daemons.sh --config $YARN_CONF_DIR stop nodemanager
+# stop historyServer
+"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR stop historyserver
+
diff --git a/hadoop-mapreduce/hadoop-yarn/bin/yarn b/hadoop-mapreduce/hadoop-yarn/bin/yarn
new file mode 100644
index 0000000..b6edcd9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/bin/yarn
@@ -0,0 +1,356 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# The Hadoop command script
+#
+# Environment Variables
+#
+# JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+#
+# YARN_CLASSPATH Extra Java CLASSPATH entries.
+#
+# YARN_USER_CLASSPATH_FIRST When defined, the YARN_CLASSPATH is
+# added in the beginning of the global
+# classpath. Can be defined, for example,
+# by doing
+# export YARN_USER_CLASSPATH_FIRST=true
+#
+# YARN_HEAPSIZE The maximum amount of heap to use, in MB.
+# Default is 1000.
+#
+# YARN_OPTS Extra Java runtime options.
+#
+# YARN_NAMENODE_OPTS These options are added to YARN_OPTS
+# YARN_CLIENT_OPTS when the respective command is run.
+# YARN_{COMMAND}_OPTS etc YARN_JT_OPTS applies to JobTracker
+# for e.g. YARN_CLIENT_OPTS applies to
+# more than one command (fs, dfs, fsck,
+# dfsadmin etc)
+#
+# YARN_CONF_DIR Alternate conf dir. Default is ${YARN_HOME}/conf.
+#
+# YARN_ROOT_LOGGER The root appender. Default is INFO,console
+#
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/yarn-config.sh
+
+cygwin=false
+case "`uname`" in
+CYGWIN*) cygwin=true;;
+esac
+
+# if no args specified, show usage
+if [ $# = 0 ]; then
+ echo "Usage: hadoop [--config confdir] COMMAND"
+ echo "where COMMAND is one of:"
+ echo " resourcemanager run the ResourceManager"
+ echo " nodemanager run a nodemanager on each slave"
+ echo " historyserver run job history servers as a standalone daemon"
+ echo " rmadmin admin tools"
+ echo " version print the version"
+ echo " jar <jar> run a jar file"
+ echo " logs dump container logs"
+ echo " classpath prints the class path needed to get the"
+ echo " Hadoop jar and the required libraries"
+ echo " daemonlog get/set the log level for each daemon"
+ echo " or"
+ echo " CLASSNAME run the class named CLASSNAME"
+ echo "Most commands print help when invoked w/o parameters."
+ exit 1
+fi
+
+# get arguments
+COMMAND=$1
+shift
+
+if [ -f "${YARN_CONF_DIR}/yarn-env.sh" ]; then
+ . "${YARN_CONF_DIR}/yarn-env.sh"
+fi
+
+# some Java parameters
+if [ "$JAVA_HOME" != "" ]; then
+ #echo "run java in $JAVA_HOME"
+ JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+ echo "Error: JAVA_HOME is not set."
+ exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+ #echo "run with heapsize $YARN_HEAPSIZE"
+ JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+ #echo $JAVA_HEAP_MAX
+fi
+
+# CLASSPATH initially contains $HADOOP_CONF_DIR & $YARN_CONF_DIR
+CLASSPATH="${HADOOP_CONF_DIR}:${YARN_CONF_DIR}"
+if [ "$YARN_USER_CLASSPATH_FIRST" != "" ] && [ "$YARN_CLASSPATH" != "" ] ; then
+ CLASSPATH=${CLASSPATH}:${YARN_CLASSPATH}
+fi
+CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
+
+# for developers, add Hadoop classes to CLASSPATH
+if [ -d "$YARN_HOME/yarn-api/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$YARN_HOME/yarn-api/target/classes
+fi
+if [ -d "$YARN_HOME/yarn-common/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$YARN_HOME/yarn-common/target/classes
+fi
+if [ -d "$YARN_HOME/yarn-mapreduce/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$YARN_HOME/yarn-mapreduce/target/classes
+fi
+if [ -d "$YARN_HOME/yarn-master-worker/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$YARN_HOME/yarn-master-worker/target/classes
+fi
+if [ -d "$YARN_HOME/yarn-server/yarn-server-nodemanager/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$YARN_HOME/yarn-server/yarn-server-nodemanager/target/classes
+fi
+if [ -d "$YARN_HOME/yarn-server/yarn-server-common/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$YARN_HOME/yarn-server/yarn-server-common/target/classes
+fi
+if [ -d "$YARN_HOME/yarn-server/yarn-server-resourcemanager/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$YARN_HOME/yarn-server/yarn-server-resourcemanager/target/classes
+fi
+if [ -d "$YARN_HOME/build/test/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$YARN_HOME/target/test/classes
+fi
+if [ -d "$YARN_HOME/build/tools" ]; then
+ CLASSPATH=${CLASSPATH}:$YARN_HOME/build/tools
+fi
+
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+# add hadoop-common libs to CLASSPATH
+
+if [ -d "$HADOOP_COMMON_HOME/build/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/build/classes
+fi
+if [ -d "$HADOOP_COMMON_HOME/build/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/build
+fi
+if [ -d "$HADOOP_COMMON_HOME/build/test/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/build/test/classes
+fi
+if [ -d "$HADOOP_COMMON_HOME/build/test/core/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/build/test/core/classes
+fi
+
+for f in $HADOOP_COMMON_HOME/hadoop-*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+for f in $HADOOP_COMMON_HOME/lib/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+for f in $HADOOP_COMMON_HOME/share/hadoop/common/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+for f in $HADOOP_COMMON_HOME/share/hadoop/common/lib/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+for f in $HADOOP_COMMON_HOME/share/hadoop/hdfs/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+if [ -d "$HADOOP_COMMON_HOME/build/ivy/lib/Hadoop-Common/common" ]; then
+for f in $HADOOP_COMMON_HOME/build/ivy/lib/Hadoop-Common/common/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+fi
+
+if [ -d "$HADOOP_COMMON_HOME/build/ivy/lib/Hadoop-Hdfs/common" ]; then
+for f in $HADOOP_COMMON_HOME/build/ivy/lib/Hadoop-Hdfs/common/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+fi
+
+if [ -d "$HADOOP_COMMON_HOME/build/ivy/lib/Hadoop/common" ]; then
+for f in $HADOOP_COMMON_HOME/build/ivy/lib/Hadoop/common/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+fi
+
+# add hadoop-hdfs libs to CLASSPATH
+
+for f in $HADOOP_HDFS_HOME/hadoop-*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+for f in $HADOOP_HDFS_HOME/lib/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+if [ -d "$HADOOP_HDFS_HOME/build/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/classes
+fi
+if [ -d "$HADOOP_HDFS_HOME/build/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build
+fi
+if [ -d "$HADOOP_HDFS_HOME/build/test/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/test/classes
+fi
+if [ -d "$HADOOP_HDFS_HOME/build/tools" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/tools
+fi
+
+# add hadoop-mapred libs to CLASSPATH
+
+for f in $HADOOP_HDFS_HOME/hadoop-*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+for f in $HADOOP_HDFS_HOME/lib/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+if [ -d "$HADOOP_MAPRED_HOME/build/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build/classes
+fi
+if [ -d "$HADOOP_MAPRED_HOME/build/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build
+fi
+if [ -d "$HADOOP_MAPRED_HOME/build/test/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build/test/classes
+fi
+if [ -d "$HADOOP_MAPRED_HOME/build/tools" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build/tools
+fi
+
+# for releases, add core mapred jar & webapps to CLASSPATH
+if [ -d "$HADOOP_MAPRED_HOME/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME
+fi
+
+# add libs to CLASSPATH
+for f in $HADOOP_MAPRED_HOME/lib/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+# add libs to CLASSPATH
+for f in $HADOOP_MAPRED_HOME/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+# add libs to CLASSPATH
+for f in $YARN_HOME/lib/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+# add yarn libs to CLASSPATH
+for f in $YARN_HOME/modules/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+# add user-specified CLASSPATH last
+if [ "$YARN_USER_CLASSPATH_FIRST" = "" ] && [ "$YARN_CLASSPATH" != "" ]; then
+ CLASSPATH=${CLASSPATH}:${YARN_CLASSPATH}
+fi
+
+# default log directory & file
+if [ "$YARN_LOG_DIR" = "" ]; then
+ YARN_LOG_DIR="$YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+ YARN_LOGFILE='yarn.log'
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+# figure out which class to run
+if [ "$COMMAND" = "classpath" ] ; then
+ if $cygwin; then
+ CLASSPATH=`cygpath -p -w "$CLASSPATH"`
+ fi
+ echo $CLASSPATH
+ exit
+elif [ "$COMMAND" = "rmadmin" ] ; then
+ CLASS='org.apache.hadoop.yarn.server.resourcemanager.tools.RMAdmin'
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "resourcemanager" ] ; then
+ CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/rm-config/log4j.properties
+ CLASS='org.apache.hadoop.yarn.server.resourcemanager.ResourceManager'
+ YARN_OPTS="$YARN_OPTS $YARN_RESOURCEMANAGER_OPTS"
+elif [ "$COMMAND" = "nodemanager" ] ; then
+ CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/nm-config/log4j.properties
+ CLASS='org.apache.hadoop.yarn.server.nodemanager.NodeManager'
+ if [[ $EUID -eq 0 ]]; then
+ YARN_OPTS="$YARN_OPTS -jvm server $YARN_NODEMANAGER_OPTS"
+ else
+ YARN_OPTS="$YARN_OPTS -server $YARN_NODEMANAGER_OPTS"
+ fi
+elif [ "$COMMAND" = "historyserver" ] ; then
+ CLASS=org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer
+ YARN_OPTS="$YARN_OPTS $YARN_JOB_HISTORYSERVER_OPTS"
+elif [ "$COMMAND" = "version" ] ; then
+ CLASS=org.apache.hadoop.util.VersionInfo
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "jar" ] ; then
+ CLASS=org.apache.hadoop.util.RunJar
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "logs" ] ; then
+ CLASS=org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.LogDumper
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "daemonlog" ] ; then
+ CLASS=org.apache.hadoop.log.LogLevel
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+else
+ CLASS=$COMMAND
+fi
+
+# cygwin path translation
+if $cygwin; then
+ CLASSPATH=`cygpath -p -w "$CLASSPATH"`
+ YARN_HOME=`cygpath -w "$YARN_HOME"`
+ YARN_LOG_DIR=`cygpath -w "$YARN_LOG_DIR"`
+ TOOL_PATH=`cygpath -p -w "$TOOL_PATH"`
+fi
+
+# cygwin path translation
+if $cygwin; then
+ JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"`
+fi
+
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_HOME"
+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+ YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+
+echo "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $YARN_OPTS -classpath "$CLASSPATH" $CLASS "$@"
+exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $YARN_OPTS -classpath "$CLASSPATH" $CLASS "$@"
+fi
diff --git a/hadoop-mapreduce/hadoop-yarn/bin/yarn-config.sh b/hadoop-mapreduce/hadoop-yarn/bin/yarn-config.sh
new file mode 100644
index 0000000..87cda26
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/bin/yarn-config.sh
@@ -0,0 +1,68 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# included in all the hadoop scripts with source command
+# should not be executable directly
+# also should not be passed any arguments, since we need original $*
+
+# resolve links - $0 may be a softlink
+
+this="$0"
+while [ -h "$this" ]; do
+ ls=`ls -ld "$this"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '.*/.*' > /dev/null; then
+ this="$link"
+ else
+ this=`dirname "$this"`/"$link"
+ fi
+done
+
+# convert relative path to absolute path
+bin=`dirname "$this"`
+script=`basename "$this"`
+bin=`cd "$bin"; pwd`
+this="$bin/$script"
+
+# the root of the Hadoop installation
+export YARN_HOME=`dirname "$this"`/..
+
+#check to see if the conf dir is given as an optional argument
+if [ $# -gt 1 ]
+then
+ if [ "--config" = "$1" ]
+ then
+ shift
+ confdir=$1
+ shift
+ YARN_CONF_DIR=$confdir
+ fi
+fi
+
+# Allow alternate conf dir location.
+YARN_CONF_DIR="${YARN_CONF_DIR:-$YARN_HOME/conf}"
+
+#check to see it is specified whether to use the slaves or the
+# masters file
+if [ $# -gt 1 ]
+then
+ if [ "--hosts" = "$1" ]
+ then
+ shift
+ slavesfile=$1
+ shift
+ export YARN_SLAVES="${YARN_CONF_DIR}/$slavesfile"
+ fi
+fi
diff --git a/hadoop-mapreduce/hadoop-yarn/bin/yarn-daemon.sh b/hadoop-mapreduce/hadoop-yarn/bin/yarn-daemon.sh
new file mode 100644
index 0000000..efe6559
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/bin/yarn-daemon.sh
@@ -0,0 +1,144 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Runs a yarn command as a daemon.
+#
+# Environment Variables
+#
+# YARN_CONF_DIR Alternate conf dir. Default is ${YARN_HOME}/conf.
+# YARN_LOG_DIR Where log files are stored. PWD by default.
+# YARN_MASTER host:path where hadoop code should be rsync'd from
+# YARN_PID_DIR The pid files are stored. /tmp by default.
+# YARN_IDENT_STRING A string representing this instance of hadoop. $USER by default
+# YARN_NICENESS The scheduling priority for daemons. Defaults to 0.
+##
+
+usage="Usage: yarn-daemon.sh [--config <conf-dir>] [--hosts hostlistfile] (start|stop) <yarn-command> "
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/yarn-config.sh
+
+# get arguments
+startStop=$1
+shift
+command=$1
+shift
+
+hadoop_rotate_log ()
+{
+ log=$1;
+ num=5;
+ if [ -n "$2" ]; then
+ num=$2
+ fi
+ if [ -f "$log" ]; then # rotate logs
+ while [ $num -gt 1 ]; do
+ prev=`expr $num - 1`
+ [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
+ num=$prev
+ done
+ mv "$log" "$log.$num";
+ fi
+}
+
+if [ -f "${YARN_CONF_DIR}/yarn-env.sh" ]; then
+ . "${YARN_CONF_DIR}/yarn-env.sh"
+fi
+
+if [ "$YARN_IDENT_STRING" = "" ]; then
+ export YARN_IDENT_STRING="$USER"
+fi
+
+# get log directory
+if [ "$YARN_LOG_DIR" = "" ]; then
+ export YARN_LOG_DIR="$YARN_HOME/logs"
+fi
+mkdir -p "$YARN_LOG_DIR"
+chown $YARN_IDENT_STRING $YARN_LOG_DIR
+
+if [ "$YARN_PID_DIR" = "" ]; then
+ YARN_PID_DIR=/tmp
+fi
+
+# some variables
+export YARN_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log
+export YARN_ROOT_LOGGER="INFO,DRFA"
+log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out
+pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid
+
+# Set default scheduling priority
+if [ "$YARN_NICENESS" = "" ]; then
+ export YARN_NICENESS=0
+fi
+
+case $startStop in
+
+ (start)
+
+ mkdir -p "$YARN_PID_DIR"
+
+ if [ -f $pid ]; then
+ if kill -0 `cat $pid` > /dev/null 2>&1; then
+ echo $command running as process `cat $pid`. Stop it first.
+ exit 1
+ fi
+ fi
+
+ if [ "$YARN_MASTER" != "" ]; then
+ echo rsync from $YARN_MASTER
+ rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $YARN_MASTER/ "$YARN_HOME"
+ fi
+
+ hadoop_rotate_log $log
+ echo starting $command, logging to $log
+ cd "$YARN_HOME"
+ nohup nice -n $YARN_NICENESS "$YARN_HOME"/bin/yarn --config $YARN_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
+ echo $! > $pid
+ sleep 1; head "$log"
+ ;;
+
+ (stop)
+
+ if [ -f $pid ]; then
+ if kill -0 `cat $pid` > /dev/null 2>&1; then
+ echo stopping $command
+ kill `cat $pid`
+ else
+ echo no $command to stop
+ fi
+ else
+ echo no $command to stop
+ fi
+ ;;
+
+ (*)
+ echo $usage
+ exit 1
+ ;;
+
+esac
+
+
diff --git a/hadoop-mapreduce/hadoop-yarn/bin/yarn-daemons.sh b/hadoop-mapreduce/hadoop-yarn/bin/yarn-daemons.sh
new file mode 100644
index 0000000..4f89a68
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/bin/yarn-daemons.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Run a Yarn command on all slave hosts.
+
+usage="Usage: yarn-daemons.sh [--config confdir] [--hosts hostlistfile] [start
+|stop] command args..."
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+. $bin/yarn-config.sh
+
+exec "$bin/slaves.sh" --config $YARN_CONF_DIR cd "$YARN_HOME" \; "$bin/yarn-daemon.sh" --config $YARN_CONF_DIR "$@"
+
diff --git a/hadoop-mapreduce/hadoop-yarn/conf/slaves b/hadoop-mapreduce/hadoop-yarn/conf/slaves
new file mode 100644
index 0000000..2fbb50c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/conf/slaves
@@ -0,0 +1 @@
+localhost
diff --git a/hadoop-mapreduce/hadoop-yarn/conf/yarn-env.sh b/hadoop-mapreduce/hadoop-yarn/conf/yarn-env.sh
new file mode 100644
index 0000000..e4fb7eb
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/conf/yarn-env.sh
@@ -0,0 +1,79 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# resolve links - $0 may be a softlink
+export YARN_CONF_DIR="${YARN_CONF_DIR:-$YARN_HOME/conf}"
+
+# some Java parameters
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+if [ "$JAVA_HOME" != "" ]; then
+ #echo "run java in $JAVA_HOME"
+ JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+ echo "Error: JAVA_HOME is not set."
+ exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+ #echo "run with heapsize $YARN_HEAPSIZE"
+ JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+ #echo $JAVA_HEAP_MAX
+fi
+
+# CLASSPATH initially contains $YARN_CONF_DIR
+CLASSPATH="${YARN_CONF_DIR}"
+CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+
+# default log directory & file
+if [ "$YARN_LOG_DIR" = "" ]; then
+ YARN_LOG_DIR="$YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+ YARN_LOGFILE='yarn.log'
+fi
+
+# default policy file for service-level authorization
+if [ "$YARN_POLICYFILE" = "" ]; then
+ YARN_POLICYFILE="hadoop-policy.xml"
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+ YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
+
+
diff --git a/hadoop-mapreduce/hadoop-yarn/conf/yarn-site.xml b/hadoop-mapreduce/hadoop-yarn/conf/yarn-site.xml
new file mode 100644
index 0000000..d14deea
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/conf/yarn-site.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0"?>
+<configuration>
+
+<!-- Site specific YARN configuration properties -->
+
+</configuration>
diff --git a/hadoop-mapreduce/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-mapreduce/hadoop-yarn/dev-support/findbugs-exclude.xml
new file mode 100644
index 0000000..03853ad
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -0,0 +1,183 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<FindBugsFilter>
+ <!-- Ignore PB Generated Code -->
+ <Match>
+ <Package name="org.apache.hadoop.yarn.proto" />
+ </Match>
+ <Match>
+ <Class name="~org\.apache\.hadoop\.yarn\.ipc\.RpcProtos.*" />
+ </Match>
+
+ <!-- Ignore unchecked Event casts -->
+ <Match>
+ <Class name="org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl" />
+ <Bug pattern="BC_UNCONFIRMED_CAST" />
+ </Match>
+ <Match>
+ <Class name="~org\.apache\.hadoop\.yarn\.server\.nodemanager\.containermanager\.application\.ApplicationImpl.*" />
+ <Bug pattern="BC_UNCONFIRMED_CAST" />
+ </Match>
+ <Match>
+ <Class name="~org\.apache\.hadoop\.yarn\.server\.nodemanager\.containermanager\.container\.ContainerImpl.*" />
+ <Bug pattern="BC_UNCONFIRMED_CAST" />
+ </Match>
+ <Match>
+ <Class name="~org\.apache\.hadoop\.yarn\.server\.nodemanager\.containermanager\.localizer\.LocalizedResource.*" />
+ <Bug pattern="BC_UNCONFIRMED_CAST" />
+ </Match>
+ <Match>
+ <Class name="~org\.apache\.hadoop\.yarn\.server\.nodemanager\.containermanager\.localizer\.ResourceLocalizationService.*" />
+ <Bug pattern="BC_UNCONFIRMED_CAST" />
+ </Match>
+ <Match>
+ <Class name="org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl" />
+ <Bug pattern="BC_UNCONFIRMED_CAST" />
+ </Match>
+ <Match>
+ <Class name="~org\.apache\.hadoop\.yarn\.server\.resourcemanager\.rmapp\.attempt\.RMAppAttemptImpl.*" />
+ <Bug pattern="BC_UNCONFIRMED_CAST" />
+ </Match>
+ <Match>
+ <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl$AppRejectedTransition" />
+ <Bug pattern="BC_UNCONFIRMED_CAST" />
+ </Match>
+ <Match>
+ <Class name="~org\.apache\.hadoop\.yarn\.server\.resourcemanager\.rmcontainer\.RMContainerImpl.*" />
+ <Bug pattern="BC_UNCONFIRMED_CAST" />
+ </Match>
+ <Match>
+ <Class name="~org\.apache\.hadoop\.yarn\.server\.resourcemanager\.rmnode\.RMNodeImpl.*" />
+ <Bug pattern="BC_UNCONFIRMED_CAST" />
+ </Match>
+ <Match>
+ <Class name="~org\.apache\.hadoop\.yarn\.server\.resourcemanager\.scheduler\.capacity\.CapacityScheduler.*" />
+ <Method name="handle" />
+ <Bug pattern="BC_UNCONFIRMED_CAST" />
+ </Match>
+ <Match>
+ <Class name="~org\.apache\.hadoop\.yarn\.server\.resourcemanager\.scheduler\.fifo\.FifoScheduler.*" />
+ <Method name="handle" />
+ <Bug pattern="BC_UNCONFIRMED_CAST" />
+ </Match>
+
+ <!-- Ignore intentional switch fallthroughs -->
+ <Match>
+ <Class name="org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl" />
+ <Method name="finished" />
+ <Bug pattern="SF_SWITCH_FALLTHROUGH" />
+ </Match>
+ <Match>
+ <Class name="org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer" />
+ <Method name="localizeFiles" />
+ <Bug pattern="SF_SWITCH_FALLTHROUGH" />
+ </Match>
+
+ <!-- Ignore some irrelevant serialization warnings -->
+ <Match>
+ <Class name="org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceRetentionSet$LRUComparator" />
+ <Bug pattern="SE_COMPARATOR_SHOULD_BE_SERIALIZABLE" />
+ </Match>
+ <Match>
+ <Class name="org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl" />
+ <Field name="builder" />
+ <Bug pattern="SE_BAD_FIELD" />
+ </Match>
+ <Match>
+ <Class name="~org\.apache\.hadoop\.yarn\.util\.BuilderUtils.*" />
+ <Bug pattern="SE_COMPARATOR_SHOULD_BE_SERIALIZABLE" />
+ </Match>
+ <Match>
+ <Class name="org.apache.hadoop.yarn.server.resourcemanager.resource.Priority$Comparator" />
+ <Bug pattern="SE_COMPARATOR_SHOULD_BE_SERIALIZABLE" />
+ </Match>
+
+ <!-- Inconsistent sync warning - only start() is synchronized-->
+ <Match>
+ <Class name="org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.LogAggregationService" />
+ <Bug pattern="IS2_INCONSISTENT_SYNC" />
+ </Match>
+ <!-- Inconsistent sync warning - reinitialize read from other queue does not need sync-->
+ <Match>
+ <Class name="org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue" />
+ <Or>
+ <Field name="absoluteCapacity" />
+ <Field name="absoluteMaxCapacity" />
+ <Field name="acls" />
+ <Field name="capacity" />
+ <Field name="maxApplications" />
+ <Field name="maxApplicationsPerUser" />
+ <Field name="maximumCapacity" />
+ <Field name="state" />
+ <Field name="userLimit" />
+ <Field name="userLimitFactor" />
+ </Or>
+ <Bug pattern="IS2_INCONSISTENT_SYNC" />
+ </Match>
+ <Match>
+ <Class name="org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue" />
+ <Or>
+ <Field name="absoluteCapacity" />
+ <Field name="absoluteMaxCapacity" />
+ <Field name="acls" />
+ <Field name="capacity" />
+ <Field name="maximumCapacity" />
+ <Field name="state" />
+ </Or>
+ <Bug pattern="IS2_INCONSISTENT_SYNC" />
+ </Match>
+
+ <!-- Don't care if putIfAbsent value is ignored -->
+ <Match>
+ <Package name="org.apache.hadoop.yarn.factories.impl.pb" />
+ <Bug pattern="RV_RETURN_VALUE_OF_PUTIFABSENT_IGNORED" />
+ </Match>
+
+ <!-- Intended System.exit calls -->
+ <Match>
+ <Class name="org.apache.hadoop.yarn.webapp.Dispatcher$1" />
+ <Bug pattern="DM_EXIT" />
+ </Match>
+ <Match>
+ <Class name="org.apache.hadoop.yarn.webapp.WebApps$Builder" />
+ <Bug pattern="DM_EXIT" />
+ </Match>
+ <!-- AsyncDispatcher will kill the process if there is an error dispatching -->
+ <Match>
+ <Class name="org.apache.hadoop.yarn.event.AsyncDispatcher" />
+ <Method name="dispatch" />
+ <Bug pattern="DM_EXIT" />
+ </Match>
+
+ <!-- Ignore heartbeat exception when killing localizer -->
+ <Match>
+ <Class name="org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer" />
+ <Method name="localizeFiles" />
+ <Bug pattern="DE_MIGHT_IGNORE" />
+ </Match>
+
+ <!-- Ignore EI_EXPOSE_REP2 in Log services -->
+ <Match>
+ <Class name="org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.AggregatedLogFormat$LogValue" />
+ <Bug pattern="EI_EXPOSE_REP2" />
+ </Match>
+ <Match>
+ <Class name="org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.AppLogAggregatorImpl" />
+ <Bug pattern="EI_EXPOSE_REP2" />
+ </Match>
+</FindBugsFilter>
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/pom.xml b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/pom.xml
new file mode 100644
index 0000000..16d59b2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/pom.xml
@@ -0,0 +1,85 @@
+<?xml version="1.0"?>
+<project xmlns:pom="http://maven.apache.org/POM/4.0.0">
+ <parent>
+ <artifactId>hadoop-yarn</artifactId>
+ <groupId>org.apache.hadoop</groupId>
+ <version>${yarn.version}</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-api</artifactId>
+ <name>hadoop-yarn-api</name>
+
+ <properties>
+ <install.file>${project.artifact.file}</install.file>
+ <yarn.basedir>${project.parent.basedir}</yarn.basedir>
+ </properties>
+
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>create-protobuf-generated-sources-directory</id>
+ <phase>initialize</phase>
+ <configuration>
+ <target>
+ <mkdir dir="target/generated-sources/proto" />
+ </target>
+ </configuration>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>generate-sources</id>
+ <phase>generate-sources</phase>
+ <configuration>
+ <executable>protoc</executable>
+ <arguments>
+ <argument>-Isrc/main/proto/</argument>
+ <argument>--java_out=target/generated-sources/proto</argument>
+ <argument>src/main/proto/yarn_protos.proto</argument>
+ <argument>src/main/proto/yarn_service_protos.proto</argument>
+ <argument>src/main/proto/AM_RM_protocol.proto</argument>
+ <argument>src/main/proto/client_RM_protocol.proto</argument>
+ <argument>src/main/proto/container_manager.proto</argument>
+ </arguments>
+ </configuration>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>add-source</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>add-source</goal>
+ </goals>
+ <configuration>
+ <sources>
+ <source>target/generated-sources/proto</source>
+ </sources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/avro/AMRMProtocol.genavro b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/avro/AMRMProtocol.genavro
new file mode 100644
index 0000000..d36922e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/avro/AMRMProtocol.genavro
@@ -0,0 +1,27 @@
+@namespace("org.apache.hadoop.yarn")
+protocol AMRMProtocol {
+
+ import idl "yarn/yarn-api/src/main/avro/yarn-types.genavro";
+
+ // Scheduler
+ record Priority {
+ int priority;
+ }
+
+ record ResourceRequest {
+ Priority priority;
+ string hostName;
+ Resource capability;
+ int numContainers;
+ }
+ record AMResponse {
+ boolean reboot;
+ int responseId;
+ array<Container> containers;
+ }
+
+ void registerApplicationMaster(ApplicationMaster applicationMaster) throws YarnRemoteException;
+ void finishApplicationMaster(ApplicationMaster applicationMaster) throws YarnRemoteException;
+ AMResponse allocate(ApplicationStatus status, array<ResourceRequest> ask, array<Container> release) throws YarnRemoteException;
+
+ }
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/avro/ClientRMProtocol.genavro b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/avro/ClientRMProtocol.genavro
new file mode 100644
index 0000000..a37fc03
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/avro/ClientRMProtocol.genavro
@@ -0,0 +1,45 @@
+@namespace("org.apache.hadoop.yarn")
+protocol ClientRMProtocol {
+
+ import idl "yarn/yarn-api/src/main/avro/yarn-types.genavro";
+
+ record Priority {
+ int priority;
+ }
+
+ record ApplicationSubmissionContext {
+ ApplicationID applicationId;
+ union {null, string} applicationName;
+ Resource masterCapability; // TODO: Needs RM validation
+
+ //all the files required by the container to run the ApplicationMaster
+ //KEY-> destination dir name
+ //VALUE-> source path
+ map<URL> resources;
+ union {null, map<LocalResource>} resources_todo;
+
+ // TODO - Remove fsTokens (url encoded)
+ union {null, array<string>} fsTokens;
+ union {null, bytes} fsTokens_todo;
+
+ //env to be set before launching the command for ApplicationMaster
+ //KEY-> env variable name
+ //VALUE -> env variable value.
+ map<string> environment;
+ //command-line of the container that is going to launch the ApplicationMaster.
+ array<string> command;
+ union {null, string} queue;
+ union {null, Priority} priority;
+ string user; // TODO: Shouldn't pass it like this.
+ }
+
+ record YarnClusterMetrics {
+ int numNodeManagers;
+ }
+
+ ApplicationID getNewApplicationId() throws YarnRemoteException;
+ ApplicationMaster getApplicationMaster(ApplicationID applicationId) throws YarnRemoteException;
+ void submitApplication(ApplicationSubmissionContext context) throws YarnRemoteException;
+ void finishApplication(ApplicationID applicationId) throws YarnRemoteException;
+ YarnClusterMetrics getClusterMetrics() throws YarnRemoteException;
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/avro/ContainerManager.genavro b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/avro/ContainerManager.genavro
new file mode 100644
index 0000000..0ba1fb8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/avro/ContainerManager.genavro
@@ -0,0 +1,37 @@
+@namespace("org.apache.hadoop.yarn")
+protocol ContainerManager {
+
+ import idl "yarn/yarn-api/src/main/avro/yarn-types.genavro";
+
+ record ContainerLaunchContext {
+ ContainerID id;
+ string user; // TODO: Shouldn't pass it like this.
+ Resource resource; // TODO: Needs RM validation
+ union {null, map<LocalResource>} resources;
+
+ union {null, bytes} containerTokens; // FileSystem related and other application specific tokens.
+ union {null, map<bytes>} serviceData;
+
+ //env to be set before launching the command
+ //KEY-> env variable name
+ //VALUE -> env variable value.
+ map<string> env;
+
+ //commandline to launch the container. All resources are downloaded in the
+ //working directory of the command.
+ array<string> command;
+ }
+
+ record ContainerStatus {
+ ContainerID containerID;
+ ContainerState state;
+ int exitStatus;
+ }
+
+ void startContainer(ContainerLaunchContext container) throws YarnRemoteException;
+ void stopContainer(ContainerID containerID) throws YarnRemoteException;
+ void cleanupContainer(ContainerID containerID) throws YarnRemoteException;
+
+ ContainerStatus getContainerStatus(ContainerID containerID) throws YarnRemoteException;
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/avro/yarn-types.genavro b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/avro/yarn-types.genavro
new file mode 100644
index 0000000..51d2077
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/avro/yarn-types.genavro
@@ -0,0 +1,109 @@
+@namespace("org.apache.hadoop.yarn")
+protocol types {
+
+ record ApplicationID {
+ int id;
+ long clusterTimeStamp;
+ }
+
+ record ContainerID {
+ ApplicationID appID; // the application id to which this container belong.
+ int id;// unique string for this application
+ }
+
+ error YarnRemoteException {
+ union { null, string } message;
+ union { null, string } trace; //stackTrace
+ union { null, YarnRemoteException } cause;
+ }
+
+ record Resource {
+ int memory;
+ //int diskspace;
+ }
+
+ // State of the container on the ContainerManager.
+ enum ContainerState {
+ INTIALIZING,
+ RUNNING,
+ COMPLETE
+ }
+
+ record ContainerToken {
+ bytes identifier;
+ bytes password;
+ string kind;
+ string service;
+ }
+
+ record Container {
+ ContainerID id;
+ string hostName;
+ Resource resource;
+ ContainerState state;
+ union {ContainerToken, null} containerToken;
+ }
+
+ enum ApplicationState {
+ PENDING,
+ ALLOCATING,
+ ALLOCATED,
+ EXPIRED_PENDING,
+ LAUNCHING,
+ LAUNCHED,
+ RUNNING,
+ PAUSED,
+ CLEANUP,
+ COMPLETED,
+ KILLED,
+ FAILED
+ }
+
+ record ApplicationStatus {
+ int responseID; // TODO: This should be renamed as previousResponseID
+ ApplicationID applicationId;
+ float progress;
+ long lastSeen;
+ }
+
+ record ApplicationMaster {
+ ApplicationID applicationId;
+ union { null, string } host;
+ int rpcPort;
+ int httpPort;
+ ApplicationStatus status;
+ ApplicationState state;
+ union { null, string } clientToken;
+ }
+
+ record URL {
+ string scheme;
+ union { null, string } host;
+ int port;
+ string file;
+ }
+
+ enum LocalResourceVisibility {
+ // accessible to applications from all users
+ PUBLIC,
+ // accessible only to applications from the submitting user
+ PRIVATE,
+ // accessible only to this application
+ APPLICATION
+ }
+
+ enum LocalResourceType {
+ // an archive to be expanded
+ ARCHIVE,
+ // uninterpreted file
+ FILE
+ }
+
+ record LocalResource {
+ URL resource;
+ long size;
+ long timestamp;
+ LocalResourceType type;
+ LocalResourceVisibility state;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/AMRMProtocol.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/AMRMProtocol.java
new file mode 100644
index 0000000..bb09943
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/AMRMProtocol.java
@@ -0,0 +1,15 @@
+package org.apache.hadoop.yarn.api;
+
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+
+public interface AMRMProtocol {
+ public RegisterApplicationMasterResponse registerApplicationMaster(RegisterApplicationMasterRequest request) throws YarnRemoteException;
+ public FinishApplicationMasterResponse finishApplicationMaster(FinishApplicationMasterRequest request) throws YarnRemoteException;;
+ public AllocateResponse allocate(AllocateRequest request) throws YarnRemoteException;
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
new file mode 100644
index 0000000..212ca67
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
@@ -0,0 +1,49 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.api;
+
+import org.apache.hadoop.security.UserGroupInformation;
+
+/**
+ * This is the API for the applications comprising of constants that YARN sets
+ * up for the applications and the containers.
+ *
+ * TODO: Should also be defined in avro/pb IDLs
+ * TODO: Investigate the semantics and security of each cross-boundary refs.
+ */
+public interface ApplicationConstants {
+
+ // TODO: They say tokens via env isn't good.
+ public static final String APPLICATION_MASTER_TOKEN_ENV_NAME =
+ "AppMasterTokenEnv";
+
+ // TODO: They say tokens via env isn't good.
+ public static final String APPLICATION_CLIENT_SECRET_ENV_NAME =
+ "AppClientTokenEnv";
+
+ // TODO: Weird. This is part of AM command line. Instead it should be a env.
+ public static final String AM_FAIL_COUNT_STRING = "<FAILCOUNT>";
+
+ public static final String CONTAINER_TOKEN_FILE_ENV_NAME =
+ UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION;
+
+ public static final String LOCAL_DIR_ENV = "YARN_LOCAL_DIRS";
+
+ public static final String LOG_DIR_EXPANSION_VAR = "<LOG_DIR>";
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java
new file mode 100644
index 0000000..d1d3345c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java
@@ -0,0 +1,33 @@
+package org.apache.hadoop.yarn.api;
+
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+
+public interface ClientRMProtocol {
+ public GetNewApplicationIdResponse getNewApplicationId(GetNewApplicationIdRequest request) throws YarnRemoteException;
+ public GetApplicationReportResponse getApplicationReport(GetApplicationReportRequest request) throws YarnRemoteException;
+ public SubmitApplicationResponse submitApplication(SubmitApplicationRequest request) throws YarnRemoteException;
+ public FinishApplicationResponse finishApplication(FinishApplicationRequest request) throws YarnRemoteException;
+ public GetClusterMetricsResponse getClusterMetrics(GetClusterMetricsRequest request) throws YarnRemoteException;
+ public GetAllApplicationsResponse getAllApplications(GetAllApplicationsRequest request) throws YarnRemoteException;
+ public GetClusterNodesResponse getClusterNodes(GetClusterNodesRequest request) throws YarnRemoteException;
+ public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request) throws YarnRemoteException;
+ public GetQueueUserAclsInfoResponse getQueueUserAcls(GetQueueUserAclsInfoRequest request) throws YarnRemoteException;
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManager.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManager.java
new file mode 100644
index 0000000..a778793
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManager.java
@@ -0,0 +1,38 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.api;
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+
+public interface ContainerManager {
+ StartContainerResponse startContainer(StartContainerRequest request)
+ throws YarnRemoteException;
+
+ StopContainerResponse stopContainer(StopContainerRequest request)
+ throws YarnRemoteException;
+
+ GetContainerStatusResponse getContainerStatus(
+ GetContainerStatusRequest request) throws YarnRemoteException;
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
new file mode 100644
index 0000000..581e048
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
@@ -0,0 +1,37 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+
+public interface AllocateRequest {
+
+ ApplicationAttemptId getApplicationAttemptId();
+ void setApplicationAttemptId(ApplicationAttemptId applicationAttemptId);
+
+ int getResponseId();
+ void setResponseId(int id);
+
+ float getProgress();
+ void setProgress(float progress);
+
+ List<ResourceRequest> getAskList();
+ ResourceRequest getAsk(int index);
+ int getAskCount();
+
+ List<ContainerId> getReleaseList();
+ ContainerId getRelease(int index);
+ int getReleaseCount();
+
+ void addAllAsks(List<ResourceRequest> resourceRequest);
+ void addAsk(ResourceRequest request);
+ void removeAsk(int index);
+ void clearAsks();
+
+ void addAllReleases(List<ContainerId> releaseContainers);
+ void addRelease(ContainerId container);
+ void removeRelease(int index);
+ void clearReleases();
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
new file mode 100644
index 0000000..60eccf5
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.yarn.api.records.AMResponse;
+
+public interface AllocateResponse {
+ public abstract AMResponse getAMResponse();
+
+ public abstract void setAMResponse(AMResponse amResponse);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterRequest.java
new file mode 100644
index 0000000..c08f6f0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterRequest.java
@@ -0,0 +1,20 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+
+
+public interface FinishApplicationMasterRequest {
+
+ ApplicationAttemptId getApplicationAttemptId();
+ void setAppAttemptId(ApplicationAttemptId applicationAttemptId);
+
+ String getFinalState();
+ void setFinalState(String string);
+
+ String getDiagnostics();
+ void setDiagnostics(String string);
+
+ String getTrackingUrl();
+ void setTrackingUrl(String historyUrl);
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterResponse.java
new file mode 100644
index 0000000..bd2c6cf
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterResponse.java
@@ -0,0 +1,5 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+public interface FinishApplicationMasterResponse {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationRequest.java
new file mode 100644
index 0000000..bc95c6e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationRequest.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+
+public interface FinishApplicationRequest {
+ public abstract ApplicationId getApplicationId();
+
+ public abstract void setApplicationId(ApplicationId applicationId);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationResponse.java
new file mode 100644
index 0000000..a52a146
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationResponse.java
@@ -0,0 +1,5 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+public interface FinishApplicationResponse {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllApplicationsRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllApplicationsRequest.java
new file mode 100644
index 0000000..ea6646e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllApplicationsRequest.java
@@ -0,0 +1,4 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+public interface GetAllApplicationsRequest {
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllApplicationsResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllApplicationsResponse.java
new file mode 100644
index 0000000..cc64d28
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllApplicationsResponse.java
@@ -0,0 +1,10 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+
+public interface GetAllApplicationsResponse {
+ List<ApplicationReport> getApplicationList();
+ void setApplicationList(List<ApplicationReport> applications);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationReportRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationReportRequest.java
new file mode 100644
index 0000000..20e8286
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationReportRequest.java
@@ -0,0 +1,8 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+
+public interface GetApplicationReportRequest {
+ public abstract ApplicationId getApplicationId();
+ public abstract void setApplicationId(ApplicationId applicationId);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationReportResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationReportResponse.java
new file mode 100644
index 0000000..579b8d0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationReportResponse.java
@@ -0,0 +1,8 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+
+public interface GetApplicationReportResponse {
+ public abstract ApplicationReport getApplicationReport();
+ public abstract void setApplicationReport(ApplicationReport ApplicationReport);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterMetricsRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterMetricsRequest.java
new file mode 100644
index 0000000..3da48ed
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterMetricsRequest.java
@@ -0,0 +1,5 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+public interface GetClusterMetricsRequest {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterMetricsResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterMetricsResponse.java
new file mode 100644
index 0000000..d2c98b6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterMetricsResponse.java
@@ -0,0 +1,8 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
+
+public interface GetClusterMetricsResponse {
+ public abstract YarnClusterMetrics getClusterMetrics();
+ public abstract void setClusterMetrics(YarnClusterMetrics metrics);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodesRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodesRequest.java
new file mode 100644
index 0000000..e9de062
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodesRequest.java
@@ -0,0 +1,5 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+public interface GetClusterNodesRequest {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodesResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodesResponse.java
new file mode 100644
index 0000000..16ef6ca
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodesResponse.java
@@ -0,0 +1,10 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.NodeReport;
+
+public interface GetClusterNodesResponse {
+ List<NodeReport> getNodeReports();
+ void setNodeReports(List<NodeReport> nodeReports);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerStatusRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerStatusRequest.java
new file mode 100644
index 0000000..c45a712
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerStatusRequest.java
@@ -0,0 +1,8 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+
+public interface GetContainerStatusRequest {
+ public abstract ContainerId getContainerId();
+ public abstract void setContainerId(ContainerId containerId);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerStatusResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerStatusResponse.java
new file mode 100644
index 0000000..d26b11e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerStatusResponse.java
@@ -0,0 +1,8 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+
+public interface GetContainerStatusResponse {
+ public abstract ContainerStatus getStatus();
+ public abstract void setStatus(ContainerStatus containerStatus);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNewApplicationIdRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNewApplicationIdRequest.java
new file mode 100644
index 0000000..690eecd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNewApplicationIdRequest.java
@@ -0,0 +1,5 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+public interface GetNewApplicationIdRequest {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNewApplicationIdResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNewApplicationIdResponse.java
new file mode 100644
index 0000000..dc17d4f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNewApplicationIdResponse.java
@@ -0,0 +1,8 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+
+public interface GetNewApplicationIdResponse {
+ public abstract ApplicationId getApplicationId();
+ public abstract void setApplicationId(ApplicationId applicationId);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueInfoRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueInfoRequest.java
new file mode 100644
index 0000000..b218581
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueInfoRequest.java
@@ -0,0 +1,16 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+public interface GetQueueInfoRequest {
+ String getQueueName();
+ void setQueueName(String queueName);
+
+ boolean getIncludeApplications();
+ void setIncludeApplications(boolean includeApplications);
+
+ boolean getIncludeChildQueues();
+ void setIncludeChildQueues(boolean includeChildQueues);
+
+ boolean getRecursive();
+ void setRecursive(boolean recursive);
+}
+
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueInfoResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueInfoResponse.java
new file mode 100644
index 0000000..eef2608
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueInfoResponse.java
@@ -0,0 +1,8 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+
+public interface GetQueueInfoResponse {
+ QueueInfo getQueueInfo();
+ void setQueueInfo(QueueInfo queueInfo);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueUserAclsInfoRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueUserAclsInfoRequest.java
new file mode 100644
index 0000000..be03a8b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueUserAclsInfoRequest.java
@@ -0,0 +1,5 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+public interface GetQueueUserAclsInfoRequest {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueUserAclsInfoResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueUserAclsInfoResponse.java
new file mode 100644
index 0000000..4942fec
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueUserAclsInfoResponse.java
@@ -0,0 +1,13 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+
+public interface GetQueueUserAclsInfoResponse {
+
+ public List<QueueUserACLInfo> getUserAclsInfoList();
+
+ public void setUserAclsInfoList(List<QueueUserACLInfo> queueUserAclsList);
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java
new file mode 100644
index 0000000..ef8e694
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java
@@ -0,0 +1,18 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+
+public interface RegisterApplicationMasterRequest {
+
+ ApplicationAttemptId getApplicationAttemptId();
+ void setApplicationAttemptId(ApplicationAttemptId applicationAttemptId);
+
+ String getHost();
+ void setHost(String host);
+
+ int getRpcPort();
+ void setRpcPort(int port);
+
+ String getTrackingUrl();
+ void setTrackingUrl(String string);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java
new file mode 100644
index 0000000..1660388
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java
@@ -0,0 +1,10 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+
+public interface RegisterApplicationMasterResponse {
+ public Resource getMinimumResourceCapability();
+ public void setMinimumResourceCapability(Resource capability);
+ public Resource getMaximumResourceCapability();
+ public void setMaximumResourceCapability(Resource capability);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerRequest.java
new file mode 100644
index 0000000..ceb10bb
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerRequest.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+
+public interface StartContainerRequest {
+ public abstract ContainerLaunchContext getContainerLaunchContext();
+
+ public abstract void setContainerLaunchContext(ContainerLaunchContext context);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerResponse.java
new file mode 100644
index 0000000..4b37018
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerResponse.java
@@ -0,0 +1,5 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+public interface StartContainerResponse {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StopContainerRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StopContainerRequest.java
new file mode 100644
index 0000000..0debb28
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StopContainerRequest.java
@@ -0,0 +1,26 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+
+public interface StopContainerRequest {
+ ContainerId getContainerId();
+ void setContainerId(ContainerId containerId);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StopContainerResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StopContainerResponse.java
new file mode 100644
index 0000000..ffbdd8a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StopContainerResponse.java
@@ -0,0 +1,23 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+public interface StopContainerResponse {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/SubmitApplicationRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/SubmitApplicationRequest.java
new file mode 100644
index 0000000..fe4e447
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/SubmitApplicationRequest.java
@@ -0,0 +1,8 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+
+public interface SubmitApplicationRequest {
+ public abstract ApplicationSubmissionContext getApplicationSubmissionContext();
+ public abstract void setApplicationSubmissionContext(ApplicationSubmissionContext context);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/SubmitApplicationResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/SubmitApplicationResponse.java
new file mode 100644
index 0000000..fba4f28
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/SubmitApplicationResponse.java
@@ -0,0 +1,5 @@
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+public interface SubmitApplicationResponse {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
new file mode 100644
index 0000000..de5c494
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
@@ -0,0 +1,317 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ResourceRequestPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProtoOrBuilder;
+
+
+
+public class AllocateRequestPBImpl extends ProtoBase<AllocateRequestProto> implements AllocateRequest {
+ AllocateRequestProto proto = AllocateRequestProto.getDefaultInstance();
+ AllocateRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ApplicationAttemptId applicationAttemptID = null;
+ private List<ResourceRequest> ask = null;
+ private List<ContainerId> release = null;
+
+
+ public AllocateRequestPBImpl() {
+ builder = AllocateRequestProto.newBuilder();
+ }
+
+ public AllocateRequestPBImpl(AllocateRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public AllocateRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.applicationAttemptID != null) {
+ builder.setApplicationAttemptId(convertToProtoFormat(this.applicationAttemptID));
+ }
+ if (this.ask != null) {
+ addAsksToProto();
+ }
+ if (this.release != null) {
+ addReleasesToProto();
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = AllocateRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ @Override
+ public ApplicationAttemptId getApplicationAttemptId() {
+ AllocateRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.applicationAttemptID != null) {
+ return this.applicationAttemptID;
+ }
+ if (!p.hasApplicationAttemptId()) {
+ return null;
+ }
+ this.applicationAttemptID = convertFromProtoFormat(p.getApplicationAttemptId());
+ return this.applicationAttemptID;
+ }
+
+ @Override
+ public void setApplicationAttemptId(ApplicationAttemptId appAttemptId) {
+ maybeInitBuilder();
+ if (appAttemptId == null)
+ builder.clearApplicationAttemptId();
+ this.applicationAttemptID = appAttemptId;
+ }
+
+ @Override
+ public int getResponseId() {
+ AllocateRequestProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getResponseId();
+ }
+
+ @Override
+ public void setResponseId(int id) {
+ maybeInitBuilder();
+ builder.setResponseId(id);
+ }
+
+ @Override
+ public float getProgress() {
+ AllocateRequestProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getProgress();
+ }
+
+ @Override
+ public void setProgress(float progress) {
+ maybeInitBuilder();
+ builder.setProgress(progress);
+ }
+
+ @Override
+ public List<ResourceRequest> getAskList() {
+ initAsks();
+ return this.ask;
+ }
+ @Override
+ public ResourceRequest getAsk(int index) {
+ initAsks();
+ return this.ask.get(index);
+ }
+ @Override
+ public int getAskCount() {
+ initAsks();
+ return this.ask.size();
+ }
+
+ private void initAsks() {
+ if (this.ask != null) {
+ return;
+ }
+ AllocateRequestProtoOrBuilder p = viaProto ? proto : builder;
+ List<ResourceRequestProto> list = p.getAskList();
+ this.ask = new ArrayList<ResourceRequest>();
+
+ for (ResourceRequestProto c : list) {
+ this.ask.add(convertFromProtoFormat(c));
+ }
+ }
+
+ @Override
+ public void addAllAsks(final List<ResourceRequest> ask) {
+ if (ask == null)
+ return;
+ initAsks();
+ this.ask.addAll(ask);
+ }
+
+ private void addAsksToProto() {
+ maybeInitBuilder();
+ builder.clearAsk();
+ if (ask == null)
+ return;
+ Iterable<ResourceRequestProto> iterable = new Iterable<ResourceRequestProto>() {
+ @Override
+ public Iterator<ResourceRequestProto> iterator() {
+ return new Iterator<ResourceRequestProto>() {
+
+ Iterator<ResourceRequest> iter = ask.iterator();
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+
+ @Override
+ public ResourceRequestProto next() {
+ return convertToProtoFormat(iter.next());
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+
+ }
+ };
+
+ }
+ };
+ builder.addAllAsk(iterable);
+ }
+ @Override
+ public void addAsk(ResourceRequest ask) {
+ initAsks();
+ this.ask.add(ask);
+ }
+ @Override
+ public void removeAsk(int index) {
+ initAsks();
+ this.ask.remove(index);
+ }
+ @Override
+ public void clearAsks() {
+ initAsks();
+ this.ask.clear();
+ }
+ @Override
+ public List<ContainerId> getReleaseList() {
+ initReleases();
+ return this.release;
+ }
+ @Override
+ public ContainerId getRelease(int index) {
+ initReleases();
+ return this.release.get(index);
+ }
+ @Override
+ public int getReleaseCount() {
+ initReleases();
+ return this.release.size();
+ }
+
+ private void initReleases() {
+ if (this.release != null) {
+ return;
+ }
+ AllocateRequestProtoOrBuilder p = viaProto ? proto : builder;
+ List<ContainerIdProto> list = p.getReleaseList();
+ this.release = new ArrayList<ContainerId>();
+
+ for (ContainerIdProto c : list) {
+ this.release.add(convertFromProtoFormat(c));
+ }
+ }
+
+ @Override
+ public void addAllReleases(final List<ContainerId> release) {
+ if (release == null)
+ return;
+ initReleases();
+ this.release.addAll(release);
+ }
+
+ private void addReleasesToProto() {
+ maybeInitBuilder();
+ builder.clearRelease();
+ if (release == null)
+ return;
+ Iterable<ContainerIdProto> iterable = new Iterable<ContainerIdProto>() {
+ @Override
+ public Iterator<ContainerIdProto> iterator() {
+ return new Iterator<ContainerIdProto>() {
+
+ Iterator<ContainerId> iter = release.iterator();
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+
+ @Override
+ public ContainerIdProto next() {
+ return convertToProtoFormat(iter.next());
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+
+ }
+ };
+
+ }
+ };
+ builder.addAllRelease(iterable);
+ }
+ @Override
+ public void addRelease(ContainerId release) {
+ initReleases();
+ this.release.add(release);
+ }
+ @Override
+ public void removeRelease(int index) {
+ initReleases();
+ this.release.remove(index);
+ }
+ @Override
+ public void clearReleases() {
+ initReleases();
+ this.release.clear();
+ }
+
+ private ApplicationAttemptIdPBImpl convertFromProtoFormat(ApplicationAttemptIdProto p) {
+ return new ApplicationAttemptIdPBImpl(p);
+ }
+
+ private ApplicationAttemptIdProto convertToProtoFormat(ApplicationAttemptId t) {
+ return ((ApplicationAttemptIdPBImpl)t).getProto();
+ }
+
+ private ResourceRequestPBImpl convertFromProtoFormat(ResourceRequestProto p) {
+ return new ResourceRequestPBImpl(p);
+ }
+
+ private ResourceRequestProto convertToProtoFormat(ResourceRequest t) {
+ return ((ResourceRequestPBImpl)t).getProto();
+ }
+
+ private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {
+ return new ContainerIdPBImpl(p);
+ }
+
+ private ContainerIdProto convertToProtoFormat(ContainerId t) {
+ return ((ContainerIdPBImpl)t).getProto();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
new file mode 100644
index 0000000..54cbf31
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.AMResponse;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.AMResponsePBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.AMResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProtoOrBuilder;
+
+
+
+public class AllocateResponsePBImpl extends ProtoBase<AllocateResponseProto> implements AllocateResponse {
+ AllocateResponseProto proto = AllocateResponseProto.getDefaultInstance();
+ AllocateResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private AMResponse amResponse;
+
+
+ public AllocateResponsePBImpl() {
+ builder = AllocateResponseProto.newBuilder();
+ }
+
+ public AllocateResponsePBImpl(AllocateResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public AllocateResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.amResponse != null) {
+ builder.setAMResponse(convertToProtoFormat(this.amResponse));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = AllocateResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public AMResponse getAMResponse() {
+ AllocateResponseProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.amResponse != null) {
+ return this.amResponse;
+ }
+ if (!p.hasAMResponse()) {
+ return null;
+ }
+ this.amResponse= convertFromProtoFormat(p.getAMResponse());
+ return this.amResponse;
+ }
+
+ @Override
+ public void setAMResponse(AMResponse aMResponse) {
+ maybeInitBuilder();
+ if (aMResponse == null)
+ builder.clearAMResponse();
+ this.amResponse = aMResponse;
+ }
+
+ private AMResponsePBImpl convertFromProtoFormat(AMResponseProto p) {
+ return new AMResponsePBImpl(p);
+ }
+
+ private AMResponseProto convertToProtoFormat(AMResponse t) {
+ return ((AMResponsePBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterRequestPBImpl.java
new file mode 100644
index 0000000..1c9a6c0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterRequestPBImpl.java
@@ -0,0 +1,128 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProtoOrBuilder;
+
+
+
+public class FinishApplicationMasterRequestPBImpl extends ProtoBase<FinishApplicationMasterRequestProto> implements FinishApplicationMasterRequest {
+ FinishApplicationMasterRequestProto proto = FinishApplicationMasterRequestProto.getDefaultInstance();
+ FinishApplicationMasterRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ApplicationAttemptId appAttemptId = null;
+
+
+ public FinishApplicationMasterRequestPBImpl() {
+ builder = FinishApplicationMasterRequestProto.newBuilder();
+ }
+
+ public FinishApplicationMasterRequestPBImpl(FinishApplicationMasterRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public FinishApplicationMasterRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.appAttemptId != null) {
+ builder.setApplicationAttemptId(convertToProtoFormat(this.appAttemptId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = FinishApplicationMasterRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public ApplicationAttemptId getApplicationAttemptId() {
+ FinishApplicationMasterRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.appAttemptId != null) {
+ return this.appAttemptId;
+ }
+ if (!p.hasApplicationAttemptId()) {
+ return null;
+ }
+ this.appAttemptId = convertFromProtoFormat(p.getApplicationAttemptId());
+ return this.appAttemptId;
+ }
+
+ @Override
+ public void setAppAttemptId(ApplicationAttemptId applicationAttemptId) {
+ maybeInitBuilder();
+ if (applicationAttemptId == null)
+ builder.clearApplicationAttemptId();
+ this.appAttemptId = applicationAttemptId;
+ }
+
+ @Override
+ public String getDiagnostics() {
+ FinishApplicationMasterRequestProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getDiagnostics();
+ }
+
+ @Override
+ public void setDiagnostics(String diagnostics) {
+ maybeInitBuilder();
+ builder.setDiagnostics(diagnostics);
+ }
+
+ @Override
+ public String getTrackingUrl() {
+ FinishApplicationMasterRequestProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getTrackingUrl();
+ }
+
+ @Override
+ public void setTrackingUrl(String url) {
+ maybeInitBuilder();
+ builder.setTrackingUrl(url);
+ }
+
+ @Override
+ public String getFinalState() {
+ FinishApplicationMasterRequestProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getFinalState();
+ }
+
+ @Override
+ public void setFinalState(String state) {
+ maybeInitBuilder();
+ builder.setFinalState(state);
+ }
+
+ private ApplicationAttemptIdPBImpl convertFromProtoFormat(ApplicationAttemptIdProto p) {
+ return new ApplicationAttemptIdPBImpl(p);
+ }
+
+ private ApplicationAttemptIdProto convertToProtoFormat(ApplicationAttemptId t) {
+ return ((ApplicationAttemptIdPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterResponsePBImpl.java
new file mode 100644
index 0000000..1a5b7d4
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterResponsePBImpl.java
@@ -0,0 +1,41 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterResponseProto;
+
+
+
+public class FinishApplicationMasterResponsePBImpl extends ProtoBase<FinishApplicationMasterResponseProto> implements FinishApplicationMasterResponse {
+ FinishApplicationMasterResponseProto proto = FinishApplicationMasterResponseProto.getDefaultInstance();
+ FinishApplicationMasterResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public FinishApplicationMasterResponsePBImpl() {
+ builder = FinishApplicationMasterResponseProto.newBuilder();
+ }
+
+ public FinishApplicationMasterResponsePBImpl(FinishApplicationMasterResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public FinishApplicationMasterResponseProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = FinishApplicationMasterResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationRequestPBImpl.java
new file mode 100644
index 0000000..749e099
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationRequestPBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationRequestProtoOrBuilder;
+
+
+
+public class FinishApplicationRequestPBImpl extends ProtoBase<FinishApplicationRequestProto> implements FinishApplicationRequest {
+ FinishApplicationRequestProto proto = FinishApplicationRequestProto.getDefaultInstance();
+ FinishApplicationRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ApplicationId applicationId = null;
+
+
+ public FinishApplicationRequestPBImpl() {
+ builder = FinishApplicationRequestProto.newBuilder();
+ }
+
+ public FinishApplicationRequestPBImpl(FinishApplicationRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public FinishApplicationRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.applicationId != null) {
+ builder.setApplicationId(convertToProtoFormat(this.applicationId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = FinishApplicationRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public ApplicationId getApplicationId() {
+ FinishApplicationRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.applicationId != null) {
+ return this.applicationId;
+ }
+ if (!p.hasApplicationId()) {
+ return null;
+ }
+ this.applicationId = convertFromProtoFormat(p.getApplicationId());
+ return this.applicationId;
+ }
+
+ @Override
+ public void setApplicationId(ApplicationId applicationId) {
+ maybeInitBuilder();
+ if (applicationId == null)
+ builder.clearApplicationId();
+ this.applicationId = applicationId;
+ }
+
+ private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) {
+ return new ApplicationIdPBImpl(p);
+ }
+
+ private ApplicationIdProto convertToProtoFormat(ApplicationId t) {
+ return ((ApplicationIdPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationResponsePBImpl.java
new file mode 100644
index 0000000..ec76cc6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationResponsePBImpl.java
@@ -0,0 +1,41 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationResponse;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationResponseProto;
+
+
+
+public class FinishApplicationResponsePBImpl extends ProtoBase<FinishApplicationResponseProto> implements FinishApplicationResponse {
+ FinishApplicationResponseProto proto = FinishApplicationResponseProto.getDefaultInstance();
+ FinishApplicationResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public FinishApplicationResponsePBImpl() {
+ builder = FinishApplicationResponseProto.newBuilder();
+ }
+
+ public FinishApplicationResponsePBImpl(FinishApplicationResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public FinishApplicationResponseProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = FinishApplicationResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllApplicationsRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllApplicationsRequestPBImpl.java
new file mode 100644
index 0000000..f8fbc2b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllApplicationsRequestPBImpl.java
@@ -0,0 +1,29 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllApplicationsRequestProto;
+
+public class GetAllApplicationsRequestPBImpl extends
+ ProtoBase<GetAllApplicationsRequestProto> implements GetAllApplicationsRequest {
+ GetAllApplicationsRequestProto proto = GetAllApplicationsRequestProto.getDefaultInstance();
+ GetAllApplicationsRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public GetAllApplicationsRequestPBImpl() {
+ builder = GetAllApplicationsRequestProto.newBuilder();
+ }
+
+ public GetAllApplicationsRequestPBImpl(GetAllApplicationsRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ @Override
+ public GetAllApplicationsRequestProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllApplicationsResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllApplicationsResponsePBImpl.java
new file mode 100644
index 0000000..2010076
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllApplicationsResponsePBImpl.java
@@ -0,0 +1,134 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationReportPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllApplicationsResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllApplicationsResponseProtoOrBuilder;
+
+public class GetAllApplicationsResponsePBImpl
+extends ProtoBase<GetAllApplicationsResponseProto> implements
+GetAllApplicationsResponse {
+
+ GetAllApplicationsResponseProto proto =
+ GetAllApplicationsResponseProto.getDefaultInstance();
+ GetAllApplicationsResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ List<ApplicationReport> applicationList;
+
+ public GetAllApplicationsResponsePBImpl() {
+ builder = GetAllApplicationsResponseProto.newBuilder();
+ }
+
+ public GetAllApplicationsResponsePBImpl(GetAllApplicationsResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ @Override
+ public List<ApplicationReport> getApplicationList() {
+ initLocalApplicationsList();
+ return this.applicationList;
+ }
+
+ @Override
+ public void setApplicationList(List<ApplicationReport> applications) {
+ maybeInitBuilder();
+ if (applications == null)
+ builder.clearApplications();
+ this.applicationList = applications;
+ }
+
+ @Override
+ public GetAllApplicationsResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.applicationList != null) {
+ addLocalApplicationsToProto();
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetAllApplicationsResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ //Once this is called. containerList will never be null - untill a getProto is called.
+ private void initLocalApplicationsList() {
+ if (this.applicationList != null) {
+ return;
+ }
+ GetAllApplicationsResponseProtoOrBuilder p = viaProto ? proto : builder;
+ List<ApplicationReportProto> list = p.getApplicationsList();
+ applicationList = new ArrayList<ApplicationReport>();
+
+ for (ApplicationReportProto a : list) {
+ applicationList.add(convertFromProtoFormat(a));
+ }
+ }
+
+ private void addLocalApplicationsToProto() {
+ maybeInitBuilder();
+ builder.clearApplications();
+ if (applicationList == null)
+ return;
+ Iterable<ApplicationReportProto> iterable = new Iterable<ApplicationReportProto>() {
+ @Override
+ public Iterator<ApplicationReportProto> iterator() {
+ return new Iterator<ApplicationReportProto>() {
+
+ Iterator<ApplicationReport> iter = applicationList.iterator();
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+
+ @Override
+ public ApplicationReportProto next() {
+ return convertToProtoFormat(iter.next());
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+
+ }
+ };
+
+ }
+ };
+ builder.addAllApplications(iterable);
+ }
+
+ private ApplicationReportPBImpl convertFromProtoFormat(ApplicationReportProto p) {
+ return new ApplicationReportPBImpl(p);
+ }
+
+ private ApplicationReportProto convertToProtoFormat(ApplicationReport t) {
+ return ((ApplicationReportPBImpl)t).getProto();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportRequestPBImpl.java
new file mode 100644
index 0000000..24d257a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportRequestPBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProtoOrBuilder;
+
+
+
+public class GetApplicationReportRequestPBImpl extends ProtoBase<GetApplicationReportRequestProto> implements GetApplicationReportRequest {
+ GetApplicationReportRequestProto proto = GetApplicationReportRequestProto.getDefaultInstance();
+ GetApplicationReportRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ApplicationId applicationId = null;
+
+
+ public GetApplicationReportRequestPBImpl() {
+ builder = GetApplicationReportRequestProto.newBuilder();
+ }
+
+ public GetApplicationReportRequestPBImpl(GetApplicationReportRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetApplicationReportRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (applicationId != null) {
+ builder.setApplicationId(convertToProtoFormat(this.applicationId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetApplicationReportRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public ApplicationId getApplicationId() {
+ GetApplicationReportRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.applicationId != null) {
+ return this.applicationId;
+ }
+ if (!p.hasApplicationId()) {
+ return null;
+ }
+ this.applicationId = convertFromProtoFormat(p.getApplicationId());
+ return this.applicationId;
+ }
+
+ @Override
+ public void setApplicationId(ApplicationId applicationId) {
+ maybeInitBuilder();
+ if (applicationId == null)
+ builder.clearApplicationId();
+ this.applicationId = applicationId;
+ }
+
+ private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) {
+ return new ApplicationIdPBImpl(p);
+ }
+
+ private ApplicationIdProto convertToProtoFormat(ApplicationId t) {
+ return ((ApplicationIdPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportResponsePBImpl.java
new file mode 100644
index 0000000..bc47069
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportResponsePBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationReportPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportResponseProtoOrBuilder;
+
+
+
+public class GetApplicationReportResponsePBImpl extends ProtoBase<GetApplicationReportResponseProto> implements GetApplicationReportResponse {
+ GetApplicationReportResponseProto proto = GetApplicationReportResponseProto.getDefaultInstance();
+ GetApplicationReportResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ApplicationReport applicationReport = null;
+
+
+ public GetApplicationReportResponsePBImpl() {
+ builder = GetApplicationReportResponseProto.newBuilder();
+ }
+
+ public GetApplicationReportResponsePBImpl(GetApplicationReportResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetApplicationReportResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.applicationReport != null) {
+ builder.setApplicationReport(convertToProtoFormat(this.applicationReport));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetApplicationReportResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public ApplicationReport getApplicationReport() {
+ GetApplicationReportResponseProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.applicationReport != null) {
+ return this.applicationReport;
+ }
+ if (!p.hasApplicationReport()) {
+ return null;
+ }
+ this.applicationReport = convertFromProtoFormat(p.getApplicationReport());
+ return this.applicationReport;
+ }
+
+ @Override
+ public void setApplicationReport(ApplicationReport applicationMaster) {
+ maybeInitBuilder();
+ if (applicationMaster == null)
+ builder.clearApplicationReport();
+ this.applicationReport = applicationMaster;
+ }
+
+ private ApplicationReportPBImpl convertFromProtoFormat(ApplicationReportProto p) {
+ return new ApplicationReportPBImpl(p);
+ }
+
+ private ApplicationReportProto convertToProtoFormat(ApplicationReport t) {
+ return ((ApplicationReportPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsRequestPBImpl.java
new file mode 100644
index 0000000..9439d3d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsRequestPBImpl.java
@@ -0,0 +1,41 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsRequestProto;
+
+
+
+public class GetClusterMetricsRequestPBImpl extends ProtoBase<GetClusterMetricsRequestProto> implements GetClusterMetricsRequest {
+ GetClusterMetricsRequestProto proto = GetClusterMetricsRequestProto.getDefaultInstance();
+ GetClusterMetricsRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public GetClusterMetricsRequestPBImpl() {
+ builder = GetClusterMetricsRequestProto.newBuilder();
+ }
+
+ public GetClusterMetricsRequestPBImpl(GetClusterMetricsRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetClusterMetricsRequestProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetClusterMetricsRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsResponsePBImpl.java
new file mode 100644
index 0000000..85d4f55
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsResponsePBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
+import org.apache.hadoop.yarn.api.records.impl.pb.YarnClusterMetricsPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.YarnClusterMetricsProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsResponseProtoOrBuilder;
+
+
+
+public class GetClusterMetricsResponsePBImpl extends ProtoBase<GetClusterMetricsResponseProto> implements GetClusterMetricsResponse {
+ GetClusterMetricsResponseProto proto = GetClusterMetricsResponseProto.getDefaultInstance();
+ GetClusterMetricsResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private YarnClusterMetrics yarnClusterMetrics = null;
+
+
+ public GetClusterMetricsResponsePBImpl() {
+ builder = GetClusterMetricsResponseProto.newBuilder();
+ }
+
+ public GetClusterMetricsResponsePBImpl(GetClusterMetricsResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetClusterMetricsResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.yarnClusterMetrics != null) {
+ builder.setClusterMetrics(convertToProtoFormat(this.yarnClusterMetrics));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetClusterMetricsResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public YarnClusterMetrics getClusterMetrics() {
+ GetClusterMetricsResponseProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.yarnClusterMetrics != null) {
+ return this.yarnClusterMetrics;
+ }
+ if (!p.hasClusterMetrics()) {
+ return null;
+ }
+ this.yarnClusterMetrics = convertFromProtoFormat(p.getClusterMetrics());
+ return this.yarnClusterMetrics;
+ }
+
+ @Override
+ public void setClusterMetrics(YarnClusterMetrics clusterMetrics) {
+ maybeInitBuilder();
+ if (clusterMetrics == null)
+ builder.clearClusterMetrics();
+ this.yarnClusterMetrics = clusterMetrics;
+ }
+
+ private YarnClusterMetricsPBImpl convertFromProtoFormat(YarnClusterMetricsProto p) {
+ return new YarnClusterMetricsPBImpl(p);
+ }
+
+ private YarnClusterMetricsProto convertToProtoFormat(YarnClusterMetrics t) {
+ return ((YarnClusterMetricsPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesRequestPBImpl.java
new file mode 100644
index 0000000..93c1925
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesRequestPBImpl.java
@@ -0,0 +1,30 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesRequestProto;
+
+public class GetClusterNodesRequestPBImpl extends
+ ProtoBase<GetClusterNodesRequestProto> implements GetClusterNodesRequest {
+
+ GetClusterNodesRequestProto proto = GetClusterNodesRequestProto.getDefaultInstance();
+ GetClusterNodesRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public GetClusterNodesRequestPBImpl() {
+ builder = GetClusterNodesRequestProto.newBuilder();
+ }
+
+ public GetClusterNodesRequestPBImpl(GetClusterNodesRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ @Override
+ public GetClusterNodesRequestProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesResponsePBImpl.java
new file mode 100644
index 0000000..9492091
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesResponsePBImpl.java
@@ -0,0 +1,133 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeReportPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesResponseProtoOrBuilder;
+
+public class GetClusterNodesResponsePBImpl extends
+ ProtoBase<GetClusterNodesResponseProto> implements GetClusterNodesResponse {
+
+ GetClusterNodesResponseProto proto =
+ GetClusterNodesResponseProto.getDefaultInstance();
+ GetClusterNodesResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ List<NodeReport> nodeManagerInfoList;
+
+ public GetClusterNodesResponsePBImpl() {
+ builder = GetClusterNodesResponseProto.newBuilder();
+ }
+
+ public GetClusterNodesResponsePBImpl(GetClusterNodesResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ @Override
+ public List<NodeReport> getNodeReports() {
+ initLocalNodeManagerInfosList();
+ return this.nodeManagerInfoList;
+ }
+
+ @Override
+ public void setNodeReports(List<NodeReport> nodeManagers) {
+ if (nodeManagers == null) {
+ builder.clearNodeReports();
+ }
+ this.nodeManagerInfoList = nodeManagers;
+ }
+
+ @Override
+ public GetClusterNodesResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.nodeManagerInfoList != null) {
+ addLocalNodeManagerInfosToProto();
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetClusterNodesResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ //Once this is called. containerList will never be null - untill a getProto is called.
+ private void initLocalNodeManagerInfosList() {
+ if (this.nodeManagerInfoList != null) {
+ return;
+ }
+ GetClusterNodesResponseProtoOrBuilder p = viaProto ? proto : builder;
+ List<NodeReportProto> list = p.getNodeReportsList();
+ nodeManagerInfoList = new ArrayList<NodeReport>();
+
+ for (NodeReportProto a : list) {
+ nodeManagerInfoList.add(convertFromProtoFormat(a));
+ }
+ }
+
+ private void addLocalNodeManagerInfosToProto() {
+ maybeInitBuilder();
+ builder.clearNodeReports();
+ if (nodeManagerInfoList == null)
+ return;
+ Iterable<NodeReportProto> iterable = new Iterable<NodeReportProto>() {
+ @Override
+ public Iterator<NodeReportProto> iterator() {
+ return new Iterator<NodeReportProto>() {
+
+ Iterator<NodeReport> iter = nodeManagerInfoList.iterator();
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+
+ @Override
+ public NodeReportProto next() {
+ return convertToProtoFormat(iter.next());
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+
+ }
+ };
+
+ }
+ };
+ builder.addAllNodeReports(iterable);
+ }
+
+ private NodeReportPBImpl convertFromProtoFormat(NodeReportProto p) {
+ return new NodeReportPBImpl(p);
+ }
+
+ private NodeReportProto convertToProtoFormat(NodeReport t) {
+ return ((NodeReportPBImpl)t).getProto();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusRequestPBImpl.java
new file mode 100644
index 0000000..229489c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusRequestPBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusRequestProtoOrBuilder;
+
+
+
+public class GetContainerStatusRequestPBImpl extends ProtoBase<GetContainerStatusRequestProto> implements GetContainerStatusRequest {
+ GetContainerStatusRequestProto proto = GetContainerStatusRequestProto.getDefaultInstance();
+ GetContainerStatusRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ContainerId containerId = null;
+
+
+ public GetContainerStatusRequestPBImpl() {
+ builder = GetContainerStatusRequestProto.newBuilder();
+ }
+
+ public GetContainerStatusRequestPBImpl(GetContainerStatusRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetContainerStatusRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.containerId != null) {
+ builder.setContainerId(convertToProtoFormat(this.containerId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetContainerStatusRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public ContainerId getContainerId() {
+ GetContainerStatusRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.containerId != null) {
+ return this.containerId;
+ }
+ if (!p.hasContainerId()) {
+ return null;
+ }
+ this.containerId = convertFromProtoFormat(p.getContainerId());
+ return this.containerId;
+ }
+
+ @Override
+ public void setContainerId(ContainerId containerId) {
+ maybeInitBuilder();
+ if (containerId == null)
+ builder.clearContainerId();
+ this.containerId = containerId;
+ }
+
+ private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {
+ return new ContainerIdPBImpl(p);
+ }
+
+ private ContainerIdProto convertToProtoFormat(ContainerId t) {
+ return ((ContainerIdPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusResponsePBImpl.java
new file mode 100644
index 0000000..83e5f4c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusResponsePBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.ContainerStatusPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusResponseProtoOrBuilder;
+
+
+
+public class GetContainerStatusResponsePBImpl extends ProtoBase<GetContainerStatusResponseProto> implements GetContainerStatusResponse {
+ GetContainerStatusResponseProto proto = GetContainerStatusResponseProto.getDefaultInstance();
+ GetContainerStatusResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ContainerStatus containerStatus = null;
+
+
+ public GetContainerStatusResponsePBImpl() {
+ builder = GetContainerStatusResponseProto.newBuilder();
+ }
+
+ public GetContainerStatusResponsePBImpl(GetContainerStatusResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetContainerStatusResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.containerStatus != null) {
+ builder.setStatus(convertToProtoFormat(this.containerStatus));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetContainerStatusResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public ContainerStatus getStatus() {
+ GetContainerStatusResponseProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.containerStatus != null) {
+ return this.containerStatus;
+ }
+ if (!p.hasStatus()) {
+ return null;
+ }
+ this.containerStatus = convertFromProtoFormat(p.getStatus());
+ return this.containerStatus;
+ }
+
+ @Override
+ public void setStatus(ContainerStatus status) {
+ maybeInitBuilder();
+ if (status == null)
+ builder.clearStatus();
+ this.containerStatus = status;
+ }
+
+ private ContainerStatusPBImpl convertFromProtoFormat(ContainerStatusProto p) {
+ return new ContainerStatusPBImpl(p);
+ }
+
+ private ContainerStatusProto convertToProtoFormat(ContainerStatus t) {
+ return ((ContainerStatusPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationIdRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationIdRequestPBImpl.java
new file mode 100644
index 0000000..d3ad8e7
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationIdRequestPBImpl.java
@@ -0,0 +1,41 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdRequest;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationIdRequestProto;
+
+
+
+public class GetNewApplicationIdRequestPBImpl extends ProtoBase<GetNewApplicationIdRequestProto> implements GetNewApplicationIdRequest {
+ GetNewApplicationIdRequestProto proto = GetNewApplicationIdRequestProto.getDefaultInstance();
+ GetNewApplicationIdRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public GetNewApplicationIdRequestPBImpl() {
+ builder = GetNewApplicationIdRequestProto.newBuilder();
+ }
+
+ public GetNewApplicationIdRequestPBImpl(GetNewApplicationIdRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetNewApplicationIdRequestProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetNewApplicationIdRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationIdResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationIdResponsePBImpl.java
new file mode 100644
index 0000000..191eb10
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationIdResponsePBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationIdResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationIdResponseProtoOrBuilder;
+
+
+
+public class GetNewApplicationIdResponsePBImpl extends ProtoBase<GetNewApplicationIdResponseProto> implements GetNewApplicationIdResponse {
+ GetNewApplicationIdResponseProto proto = GetNewApplicationIdResponseProto.getDefaultInstance();
+ GetNewApplicationIdResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ApplicationId applicationId = null;
+
+
+ public GetNewApplicationIdResponsePBImpl() {
+ builder = GetNewApplicationIdResponseProto.newBuilder();
+ }
+
+ public GetNewApplicationIdResponsePBImpl(GetNewApplicationIdResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetNewApplicationIdResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (applicationId != null) {
+ builder.setApplicationId(convertToProtoFormat(this.applicationId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetNewApplicationIdResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public ApplicationId getApplicationId() {
+ GetNewApplicationIdResponseProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.applicationId != null) {
+ return this.applicationId;
+ }
+ if (!p.hasApplicationId()) {
+ return null;
+ }
+ this.applicationId = convertFromProtoFormat(p.getApplicationId());
+ return this.applicationId;
+ }
+
+ @Override
+ public void setApplicationId(ApplicationId applicationId) {
+ maybeInitBuilder();
+ if (applicationId == null)
+ builder.clearApplicationId();
+ this.applicationId = applicationId;
+ }
+
+ private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) {
+ return new ApplicationIdPBImpl(p);
+ }
+
+ private ApplicationIdProto convertToProtoFormat(ApplicationId t) {
+ return ((ApplicationIdPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoRequestPBImpl.java
new file mode 100644
index 0000000..9a02683
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoRequestPBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoRequestProtoOrBuilder;
+
+public class GetQueueInfoRequestPBImpl extends
+ ProtoBase<GetQueueInfoRequestProto> implements GetQueueInfoRequest {
+
+ GetQueueInfoRequestProto proto =
+ GetQueueInfoRequestProto.getDefaultInstance();
+ GetQueueInfoRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public GetQueueInfoRequestPBImpl() {
+ builder = GetQueueInfoRequestProto.newBuilder();
+ }
+
+ public GetQueueInfoRequestPBImpl(GetQueueInfoRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ @Override
+ public boolean getIncludeApplications() {
+ GetQueueInfoRequestProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.hasIncludeApplications()) ? p.getIncludeApplications() : false;
+ }
+
+ @Override
+ public boolean getIncludeChildQueues() {
+ GetQueueInfoRequestProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.hasIncludeChildQueues()) ? p.getIncludeChildQueues() : false;
+ }
+
+ @Override
+ public String getQueueName() {
+ GetQueueInfoRequestProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.hasQueueName()) ? p.getQueueName() : null;
+ }
+
+ @Override
+ public boolean getRecursive() {
+ GetQueueInfoRequestProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.hasRecursive()) ? p.getRecursive() : false;
+ }
+
+ @Override
+ public void setIncludeApplications(boolean includeApplications) {
+ maybeInitBuilder();
+ builder.setIncludeApplications(includeApplications);
+ }
+
+ @Override
+ public void setIncludeChildQueues(boolean includeChildQueues) {
+ maybeInitBuilder();
+ builder.setIncludeChildQueues(includeChildQueues);
+ }
+
+ @Override
+ public void setQueueName(String queueName) {
+ maybeInitBuilder();
+ if (queueName == null) {
+ builder.clearQueueName();
+ return;
+ }
+ builder.setQueueName((queueName));
+ }
+
+ @Override
+ public void setRecursive(boolean recursive) {
+ maybeInitBuilder();
+ builder.setRecursive(recursive);
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetQueueInfoRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ @Override
+ public GetQueueInfoRequestProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoResponsePBImpl.java
new file mode 100644
index 0000000..fb43ce4
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoResponsePBImpl.java
@@ -0,0 +1,90 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.api.records.impl.pb.QueueInfoPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.QueueInfoProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoResponseProtoOrBuilder;
+
+public class GetQueueInfoResponsePBImpl extends ProtoBase<GetQueueInfoResponseProto>
+implements GetQueueInfoResponse {
+
+ QueueInfo queueInfo;
+
+ GetQueueInfoResponseProto proto =
+ GetQueueInfoResponseProto.getDefaultInstance();
+ GetQueueInfoResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public GetQueueInfoResponsePBImpl() {
+ builder = GetQueueInfoResponseProto.newBuilder();
+ }
+
+ public GetQueueInfoResponsePBImpl(GetQueueInfoResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ @Override
+ public GetQueueInfoResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ @Override
+ public QueueInfo getQueueInfo() {
+ if (this.queueInfo != null) {
+ return this.queueInfo;
+ }
+
+ GetQueueInfoResponseProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasQueueInfo()) {
+ return null;
+ }
+ this.queueInfo = convertFromProtoFormat(p.getQueueInfo());
+ return this.queueInfo;
+ }
+
+ @Override
+ public void setQueueInfo(QueueInfo queueInfo) {
+ maybeInitBuilder();
+ if(queueInfo == null) {
+ builder.clearQueueInfo();
+ }
+ this.queueInfo = queueInfo;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.queueInfo != null) {
+ builder.setQueueInfo(convertToProtoFormat(this.queueInfo));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetQueueInfoResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ private QueueInfo convertFromProtoFormat(QueueInfoProto queueInfo) {
+ return new QueueInfoPBImpl(queueInfo);
+ }
+
+ private QueueInfoProto convertToProtoFormat(QueueInfo queueInfo) {
+ return ((QueueInfoPBImpl)queueInfo).getProto();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoRequestPBImpl.java
new file mode 100644
index 0000000..147db15
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoRequestPBImpl.java
@@ -0,0 +1,32 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoRequestProto;
+
+public class GetQueueUserAclsInfoRequestPBImpl extends
+ ProtoBase<GetQueueUserAclsInfoRequestProto> implements
+ GetQueueUserAclsInfoRequest {
+
+ GetQueueUserAclsInfoRequestProto proto =
+ GetQueueUserAclsInfoRequestProto.getDefaultInstance();
+ GetQueueUserAclsInfoRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public GetQueueUserAclsInfoRequestPBImpl() {
+ builder = GetQueueUserAclsInfoRequestProto.newBuilder();
+ }
+
+ public GetQueueUserAclsInfoRequestPBImpl(GetQueueUserAclsInfoRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ @Override
+ public GetQueueUserAclsInfoRequestProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoResponsePBImpl.java
new file mode 100644
index 0000000..be135e0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoResponsePBImpl.java
@@ -0,0 +1,135 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse;
+import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.QueueUserACLInfoPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.QueueUserACLInfoProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoResponseProtoOrBuilder;
+
+public class GetQueueUserAclsInfoResponsePBImpl extends
+ProtoBase<GetQueueUserAclsInfoResponseProto>
+implements GetQueueUserAclsInfoResponse {
+
+ List<QueueUserACLInfo> queueUserAclsInfoList;
+
+ GetQueueUserAclsInfoResponseProto proto =
+ GetQueueUserAclsInfoResponseProto.getDefaultInstance();
+ GetQueueUserAclsInfoResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public GetQueueUserAclsInfoResponsePBImpl() {
+ builder = GetQueueUserAclsInfoResponseProto.newBuilder();
+ }
+
+ public GetQueueUserAclsInfoResponsePBImpl(
+ GetQueueUserAclsInfoResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ @Override
+ public List<QueueUserACLInfo> getUserAclsInfoList() {
+ initLocalQueueUserAclsList();
+ return queueUserAclsInfoList;
+ }
+
+ @Override
+ public void setUserAclsInfoList(List<QueueUserACLInfo> queueUserAclsList) {
+ if (queueUserAclsList == null) {
+ builder.clearQueueUserAcls();
+ }
+ this.queueUserAclsInfoList = queueUserAclsList;
+ }
+
+ @Override
+ public GetQueueUserAclsInfoResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.queueUserAclsInfoList != null) {
+ addLocalQueueUserACLInfosToProto();
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetQueueUserAclsInfoResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ //Once this is called. containerList will never be null - untill a getProto is called.
+ private void initLocalQueueUserAclsList() {
+ if (this.queueUserAclsInfoList != null) {
+ return;
+ }
+ GetQueueUserAclsInfoResponseProtoOrBuilder p = viaProto ? proto : builder;
+ List<QueueUserACLInfoProto> list = p.getQueueUserAclsList();
+ queueUserAclsInfoList = new ArrayList<QueueUserACLInfo>();
+
+ for (QueueUserACLInfoProto a : list) {
+ queueUserAclsInfoList.add(convertFromProtoFormat(a));
+ }
+ }
+
+ private void addLocalQueueUserACLInfosToProto() {
+ maybeInitBuilder();
+ builder.clearQueueUserAcls();
+ if (queueUserAclsInfoList == null)
+ return;
+ Iterable<QueueUserACLInfoProto> iterable = new Iterable<QueueUserACLInfoProto>() {
+ @Override
+ public Iterator<QueueUserACLInfoProto> iterator() {
+ return new Iterator<QueueUserACLInfoProto>() {
+
+ Iterator<QueueUserACLInfo> iter = queueUserAclsInfoList.iterator();
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+
+ @Override
+ public QueueUserACLInfoProto next() {
+ return convertToProtoFormat(iter.next());
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+
+ }
+ };
+
+ }
+ };
+ builder.addAllQueueUserAcls(iterable);
+ }
+
+ private QueueUserACLInfoPBImpl convertFromProtoFormat(QueueUserACLInfoProto p) {
+ return new QueueUserACLInfoPBImpl(p);
+ }
+
+ private QueueUserACLInfoProto convertToProtoFormat(QueueUserACLInfo t) {
+ return ((QueueUserACLInfoPBImpl)t).getProto();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java
new file mode 100644
index 0000000..ccb8271
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java
@@ -0,0 +1,125 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProtoOrBuilder;
+
+
+
+public class RegisterApplicationMasterRequestPBImpl extends ProtoBase<RegisterApplicationMasterRequestProto> implements RegisterApplicationMasterRequest {
+ RegisterApplicationMasterRequestProto proto = RegisterApplicationMasterRequestProto.getDefaultInstance();
+ RegisterApplicationMasterRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ApplicationAttemptId applicationAttemptId = null;
+
+
+ public RegisterApplicationMasterRequestPBImpl() {
+ builder = RegisterApplicationMasterRequestProto.newBuilder();
+ }
+
+ public RegisterApplicationMasterRequestPBImpl(RegisterApplicationMasterRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public RegisterApplicationMasterRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.applicationAttemptId != null && !((ApplicationAttemptIdPBImpl)this.applicationAttemptId).getProto().equals(builder.getApplicationAttemptId())) {
+ builder.setApplicationAttemptId(convertToProtoFormat(this.applicationAttemptId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = RegisterApplicationMasterRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public ApplicationAttemptId getApplicationAttemptId() {
+ RegisterApplicationMasterRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.applicationAttemptId != null) {
+ return this.applicationAttemptId;
+ }
+ if (!p.hasApplicationAttemptId()) {
+ return null;
+ }
+ this.applicationAttemptId = convertFromProtoFormat(p.getApplicationAttemptId());
+ return this.applicationAttemptId;
+ }
+
+ @Override
+ public void setApplicationAttemptId(ApplicationAttemptId applicationMaster) {
+ maybeInitBuilder();
+ if (applicationMaster == null)
+ builder.clearApplicationAttemptId();
+ this.applicationAttemptId = applicationMaster;
+ }
+
+ @Override
+ public String getHost() {
+ RegisterApplicationMasterRequestProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getHost();
+ }
+
+ @Override
+ public void setHost(String host) {
+ maybeInitBuilder();
+ builder.setHost(host);
+ }
+
+ @Override
+ public int getRpcPort() {
+ RegisterApplicationMasterRequestProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getRpcPort();
+ }
+
+ @Override
+ public void setRpcPort(int port) {
+ maybeInitBuilder();
+ builder.setRpcPort(port);
+ }
+
+ @Override
+ public String getTrackingUrl() {
+ RegisterApplicationMasterRequestProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getTrackingUrl();
+ }
+
+ @Override
+ public void setTrackingUrl(String url) {
+ maybeInitBuilder();
+ builder.setTrackingUrl(url);
+ }
+
+ private ApplicationAttemptIdPBImpl convertFromProtoFormat(ApplicationAttemptIdProto p) {
+ return new ApplicationAttemptIdPBImpl(p);
+ }
+
+ private ApplicationAttemptIdProto convertToProtoFormat(ApplicationAttemptId t) {
+ return ((ApplicationAttemptIdPBImpl)t).getProto();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java
new file mode 100644
index 0000000..3c904cb
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java
@@ -0,0 +1,125 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.QueueInfoProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterResponseProtoOrBuilder;
+
+
+
+public class RegisterApplicationMasterResponsePBImpl
+extends ProtoBase<RegisterApplicationMasterResponseProto>
+implements RegisterApplicationMasterResponse {
+ RegisterApplicationMasterResponseProto proto =
+ RegisterApplicationMasterResponseProto.getDefaultInstance();
+ RegisterApplicationMasterResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private Resource minimumResourceCapability;
+ private Resource maximumResourceCapability;
+
+ public RegisterApplicationMasterResponsePBImpl() {
+ builder = RegisterApplicationMasterResponseProto.newBuilder();
+ }
+
+ public RegisterApplicationMasterResponsePBImpl(RegisterApplicationMasterResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public RegisterApplicationMasterResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.minimumResourceCapability != null) {
+ builder.setMinimumCapability(
+ convertToProtoFormat(this.minimumResourceCapability));
+ }
+ if (this.maximumResourceCapability != null) {
+ builder.setMaximumCapability(
+ convertToProtoFormat(this.maximumResourceCapability));
+ }
+ }
+
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = RegisterApplicationMasterResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ @Override
+ public Resource getMaximumResourceCapability() {
+ if (this.maximumResourceCapability != null) {
+ return this.maximumResourceCapability;
+ }
+
+ RegisterApplicationMasterResponseProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasMaximumCapability()) {
+ return null;
+ }
+
+ this.maximumResourceCapability = convertFromProtoFormat(p.getMaximumCapability());
+ return this.maximumResourceCapability;
+ }
+
+ @Override
+ public Resource getMinimumResourceCapability() {
+ if (this.minimumResourceCapability != null) {
+ return this.minimumResourceCapability;
+ }
+
+ RegisterApplicationMasterResponseProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasMinimumCapability()) {
+ return null;
+ }
+
+ this.minimumResourceCapability = convertFromProtoFormat(p.getMinimumCapability());
+ return this.minimumResourceCapability;
+ }
+
+ @Override
+ public void setMaximumResourceCapability(Resource capability) {
+ maybeInitBuilder();
+ if(maximumResourceCapability == null) {
+ builder.clearMaximumCapability();
+ }
+ this.maximumResourceCapability = capability;
+ }
+
+ @Override
+ public void setMinimumResourceCapability(Resource capability) {
+ maybeInitBuilder();
+ if(minimumResourceCapability == null) {
+ builder.clearMinimumCapability();
+ }
+ this.minimumResourceCapability = capability;
+ }
+
+ private Resource convertFromProtoFormat(ResourceProto resource) {
+ return new ResourcePBImpl(resource);
+ }
+
+ private ResourceProto convertToProtoFormat(Resource resource) {
+ return ((ResourcePBImpl)resource).getProto();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerRequestPBImpl.java
new file mode 100644
index 0000000..bca62f4
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerRequestPBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.ContainerLaunchContextPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerLaunchContextProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerRequestProtoOrBuilder;
+
+
+
+public class StartContainerRequestPBImpl extends ProtoBase<StartContainerRequestProto> implements StartContainerRequest {
+ StartContainerRequestProto proto = StartContainerRequestProto.getDefaultInstance();
+ StartContainerRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ContainerLaunchContext containerLaunchContext = null;
+
+
+ public StartContainerRequestPBImpl() {
+ builder = StartContainerRequestProto.newBuilder();
+ }
+
+ public StartContainerRequestPBImpl(StartContainerRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public StartContainerRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.containerLaunchContext != null) {
+ builder.setContainerLaunchContext(convertToProtoFormat(this.containerLaunchContext));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = StartContainerRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public ContainerLaunchContext getContainerLaunchContext() {
+ StartContainerRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.containerLaunchContext != null) {
+ return this.containerLaunchContext;
+ }
+ if (!p.hasContainerLaunchContext()) {
+ return null;
+ }
+ this.containerLaunchContext = convertFromProtoFormat(p.getContainerLaunchContext());
+ return this.containerLaunchContext;
+ }
+
+ @Override
+ public void setContainerLaunchContext(ContainerLaunchContext containerLaunchContext) {
+ maybeInitBuilder();
+ if (containerLaunchContext == null)
+ builder.clearContainerLaunchContext();
+ this.containerLaunchContext = containerLaunchContext;
+ }
+
+ private ContainerLaunchContextPBImpl convertFromProtoFormat(ContainerLaunchContextProto p) {
+ return new ContainerLaunchContextPBImpl(p);
+ }
+
+ private ContainerLaunchContextProto convertToProtoFormat(ContainerLaunchContext t) {
+ return ((ContainerLaunchContextPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerResponsePBImpl.java
new file mode 100644
index 0000000..0b1cdf6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerResponsePBImpl.java
@@ -0,0 +1,41 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerResponseProto;
+
+
+
+public class StartContainerResponsePBImpl extends ProtoBase<StartContainerResponseProto> implements StartContainerResponse {
+ StartContainerResponseProto proto = StartContainerResponseProto.getDefaultInstance();
+ StartContainerResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public StartContainerResponsePBImpl() {
+ builder = StartContainerResponseProto.newBuilder();
+ }
+
+ public StartContainerResponsePBImpl(StartContainerResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public StartContainerResponseProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = StartContainerResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainerRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainerRequestPBImpl.java
new file mode 100644
index 0000000..1aa59cf
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainerRequestPBImpl.java
@@ -0,0 +1,109 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainerRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainerRequestProtoOrBuilder;
+
+
+
+public class StopContainerRequestPBImpl extends ProtoBase<StopContainerRequestProto> implements StopContainerRequest {
+ StopContainerRequestProto proto = StopContainerRequestProto.getDefaultInstance();
+ StopContainerRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ContainerId containerId = null;
+
+
+ public StopContainerRequestPBImpl() {
+ builder = StopContainerRequestProto.newBuilder();
+ }
+
+ public StopContainerRequestPBImpl(StopContainerRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public StopContainerRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.containerId != null) {
+ builder.setContainerId(convertToProtoFormat(this.containerId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = StopContainerRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public ContainerId getContainerId() {
+ StopContainerRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.containerId != null) {
+ return this.containerId;
+ }
+ if (!p.hasContainerId()) {
+ return null;
+ }
+ this.containerId = convertFromProtoFormat(p.getContainerId());
+ return this.containerId;
+ }
+
+ @Override
+ public void setContainerId(ContainerId containerId) {
+ maybeInitBuilder();
+ if (containerId == null)
+ builder.clearContainerId();
+ this.containerId = containerId;
+ }
+
+ private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {
+ return new ContainerIdPBImpl(p);
+ }
+
+ private ContainerIdProto convertToProtoFormat(ContainerId t) {
+ return ((ContainerIdPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainerResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainerResponsePBImpl.java
new file mode 100644
index 0000000..d9c0619
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainerResponsePBImpl.java
@@ -0,0 +1,59 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainerResponseProto;
+
+
+
+public class StopContainerResponsePBImpl extends ProtoBase<StopContainerResponseProto> implements StopContainerResponse {
+ StopContainerResponseProto proto = StopContainerResponseProto.getDefaultInstance();
+ StopContainerResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public StopContainerResponsePBImpl() {
+ builder = StopContainerResponseProto.newBuilder();
+ }
+
+ public StopContainerResponsePBImpl(StopContainerResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public StopContainerResponseProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = StopContainerResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationRequestPBImpl.java
new file mode 100644
index 0000000..66eb1f7
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationRequestPBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProtoOrBuilder;
+
+
+
+public class SubmitApplicationRequestPBImpl extends ProtoBase<SubmitApplicationRequestProto> implements SubmitApplicationRequest {
+ SubmitApplicationRequestProto proto = SubmitApplicationRequestProto.getDefaultInstance();
+ SubmitApplicationRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ApplicationSubmissionContext applicationSubmissionContext = null;
+
+
+ public SubmitApplicationRequestPBImpl() {
+ builder = SubmitApplicationRequestProto.newBuilder();
+ }
+
+ public SubmitApplicationRequestPBImpl(SubmitApplicationRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public SubmitApplicationRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.applicationSubmissionContext != null) {
+ builder.setApplicationSubmissionContext(convertToProtoFormat(this.applicationSubmissionContext));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = SubmitApplicationRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public ApplicationSubmissionContext getApplicationSubmissionContext() {
+ SubmitApplicationRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.applicationSubmissionContext != null) {
+ return this.applicationSubmissionContext;
+ }
+ if (!p.hasApplicationSubmissionContext()) {
+ return null;
+ }
+ this.applicationSubmissionContext = convertFromProtoFormat(p.getApplicationSubmissionContext());
+ return this.applicationSubmissionContext;
+ }
+
+ @Override
+ public void setApplicationSubmissionContext(ApplicationSubmissionContext applicationSubmissionContext) {
+ maybeInitBuilder();
+ if (applicationSubmissionContext == null)
+ builder.clearApplicationSubmissionContext();
+ this.applicationSubmissionContext = applicationSubmissionContext;
+ }
+
+ private ApplicationSubmissionContextPBImpl convertFromProtoFormat(ApplicationSubmissionContextProto p) {
+ return new ApplicationSubmissionContextPBImpl(p);
+ }
+
+ private ApplicationSubmissionContextProto convertToProtoFormat(ApplicationSubmissionContext t) {
+ return ((ApplicationSubmissionContextPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationResponsePBImpl.java
new file mode 100644
index 0000000..e9883cf
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationResponsePBImpl.java
@@ -0,0 +1,41 @@
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationResponseProto;
+
+
+
+public class SubmitApplicationResponsePBImpl extends ProtoBase<SubmitApplicationResponseProto> implements SubmitApplicationResponse {
+ SubmitApplicationResponseProto proto = SubmitApplicationResponseProto.getDefaultInstance();
+ SubmitApplicationResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public SubmitApplicationResponsePBImpl() {
+ builder = SubmitApplicationResponseProto.newBuilder();
+ }
+
+ public SubmitApplicationResponsePBImpl(SubmitApplicationResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public SubmitApplicationResponseProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = SubmitApplicationResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMResponse.java
new file mode 100644
index 0000000..a14b641
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMResponse.java
@@ -0,0 +1,50 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.api.records;
+
+import java.util.List;
+
+public interface AMResponse {
+ public boolean getReboot();
+ public int getResponseId();
+
+ public List<Container> getNewContainerList();
+ public Container getNewContainer(int index);
+ public int getNewContainerCount();
+
+ public void setReboot(boolean reboot);
+ public void setResponseId(int responseId);
+
+ public void addAllNewContainers(List<Container> containers);
+ public void addNewContainer(Container container);
+ public void removeNewContainer(int index);
+ public void clearNewContainers();
+
+ public void setAvailableResources(Resource limit);
+ public Resource getAvailableResources();
+
+ public List<Container> getFinishedContainerList();
+ public Container getFinishedContainer(int index);
+ public int getFinishedContainerCount();
+
+ public void addAllFinishedContainers(List<Container> containers);
+ public void addFinishedContainer(Container container);
+ public void removeFinishedContainer(int index);
+ public void clearFinishedContainers();
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptId.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptId.java
new file mode 100644
index 0000000..cbe30cc
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptId.java
@@ -0,0 +1,10 @@
+package org.apache.hadoop.yarn.api.records;
+
+public interface ApplicationAttemptId extends Comparable<ApplicationAttemptId>{
+ public abstract ApplicationId getApplicationId();
+ public abstract int getAttemptId();
+
+ public abstract void setApplicationId(ApplicationId appID);
+ public abstract void setAttemptId(int attemptId);
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java
new file mode 100644
index 0000000..37d3736
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java
@@ -0,0 +1,12 @@
+package org.apache.hadoop.yarn.api.records;
+
+
+
+public interface ApplicationId extends Comparable<ApplicationId> {
+ public abstract int getId();
+ public abstract long getClusterTimestamp();
+
+ public abstract void setId(int id);
+ public abstract void setClusterTimestamp(long clusterTimestamp);
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationMaster.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationMaster.java
new file mode 100644
index 0000000..725820c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationMaster.java
@@ -0,0 +1,43 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.api.records;
+
+//TODO: Split separate object for register, deregister and in-RM use.
+public interface ApplicationMaster {
+ ApplicationId getApplicationId();
+ String getHost();
+ int getRpcPort();
+ String getTrackingUrl();
+ ApplicationStatus getStatus();
+ ApplicationState getState();
+ String getClientToken();
+ int getAMFailCount();
+ int getContainerCount();
+ String getDiagnostics();
+ void setApplicationId(ApplicationId appId);
+ void setHost(String host);
+ void setRpcPort(int rpcPort);
+ void setTrackingUrl(String url);
+ void setStatus(ApplicationStatus status);
+ void setState(ApplicationState state);
+ void setClientToken(String clientToken);
+ void setAMFailCount(int amFailCount);
+ void setContainerCount(int containerCount);
+ void setDiagnostics(String diagnostics);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
new file mode 100644
index 0000000..ee5c303
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
@@ -0,0 +1,52 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.api.records;
+
+public interface ApplicationReport {
+
+ ApplicationId getApplicationId();
+ void setApplicationId(ApplicationId applicationId);
+
+ String getUser();
+ void setUser(String user);
+
+ String getQueue();
+ void setQueue(String queue);
+
+ String getName();
+ void setName(String name);
+
+ String getHost();
+ void setHost(String host);
+
+ int getRpcPort();
+ void setRpcPort(int rpcPort);
+
+ String getClientToken();
+ void setClientToken(String clientToken);
+
+ ApplicationState getState();
+ void setState(ApplicationState state);
+
+ String getDiagnostics();
+ void setDiagnostics(String diagnostics);
+
+ String getTrackingUrl();
+ void setTrackingUrl(String url);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationState.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationState.java
new file mode 100644
index 0000000..6e57749
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationState.java
@@ -0,0 +1,23 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.api.records;
+
+public enum ApplicationState {
+ NEW, SUBMITTED, RUNNING, RESTARTING, SUCCEEDED, FAILED, KILLED
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationStatus.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationStatus.java
new file mode 100644
index 0000000..6748c53
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationStatus.java
@@ -0,0 +1,29 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.api.records;
+
+public interface ApplicationStatus {
+ ApplicationAttemptId getApplicationAttemptId();
+ int getResponseId();
+ float getProgress();
+
+ void setApplicationAttemptId(ApplicationAttemptId applicationAttemptId);
+ void setResponseId(int id);
+ void setProgress(float progress);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java
new file mode 100644
index 0000000..7253fc3
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java
@@ -0,0 +1,71 @@
+package org.apache.hadoop.yarn.api.records;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.Map;
+
+public interface ApplicationSubmissionContext {
+ public abstract ApplicationId getApplicationId();
+ public abstract String getApplicationName();
+ public abstract Resource getMasterCapability();
+
+ public abstract Map<String, URL> getAllResources();
+ public abstract URL getResource(String key);
+
+ public abstract Map<String, LocalResource> getAllResourcesTodo();
+ public abstract LocalResource getResourceTodo(String key);
+
+ public abstract List<String> getFsTokenList();
+ public abstract String getFsToken(int index);
+ public abstract int getFsTokenCount();
+
+ public abstract ByteBuffer getFsTokensTodo();
+
+ public abstract Map<String, String> getAllEnvironment();
+ public abstract String getEnvironment(String key);
+
+ public abstract List<String> getCommandList();
+ public abstract String getCommand(int index);
+ public abstract int getCommandCount();
+
+ public abstract String getQueue();
+ public abstract Priority getPriority();
+ public abstract String getUser();
+
+
+
+ public abstract void setApplicationId(ApplicationId appplicationId);
+ public abstract void setApplicationName(String applicationName);
+ public abstract void setMasterCapability(Resource masterCapability);
+
+ public abstract void addAllResources(Map<String, URL> resources);
+ public abstract void setResource(String key, URL url);
+ public abstract void removeResource(String key);
+ public abstract void clearResources();
+
+ public abstract void addAllResourcesTodo(Map<String, LocalResource> resourcesTodo);
+ public abstract void setResourceTodo(String key, LocalResource localResource);
+ public abstract void removeResourceTodo(String key);
+ public abstract void clearResourcesTodo();
+
+ public abstract void addAllFsTokens(List<String> fsTokens);
+ public abstract void addFsToken(String fsToken);
+ public abstract void removeFsToken(int index);
+ public abstract void clearFsTokens();
+
+ public abstract void setFsTokensTodo(ByteBuffer fsTokensTodo);
+
+ public abstract void addAllEnvironment(Map<String, String> environment);
+ public abstract void setEnvironment(String key, String env);
+ public abstract void removeEnvironment(String key);
+ public abstract void clearEnvironment();
+
+ public abstract void addAllCommands(List<String> commands);
+ public abstract void addCommand(String command);
+ public abstract void removeCommand(int index);
+ public abstract void clearCommands();
+
+ public abstract void setQueue(String queue);
+ public abstract void setPriority(Priority priority);
+ public abstract void setUser(String user);
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
new file mode 100644
index 0000000..523400b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
@@ -0,0 +1,38 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.api.records;
+
+
+public interface Container extends Comparable<Container> {
+ ContainerId getId();
+ NodeId getNodeId();
+ String getNodeHttpAddress();
+ Resource getResource();
+ ContainerState getState();
+ ContainerToken getContainerToken();
+ ContainerStatus getContainerStatus();
+
+ void setId(ContainerId id);
+ void setNodeId(NodeId nodeId);
+ void setNodeHttpAddress(String nodeHttpAddress);
+ void setResource(Resource resource);
+ void setState(ContainerState state);
+ void setContainerToken(ContainerToken containerToken);
+ void setContainerStatus(ContainerStatus containerStatus);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java
new file mode 100644
index 0000000..73a8ff6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java
@@ -0,0 +1,12 @@
+package org.apache.hadoop.yarn.api.records;
+
+public interface ContainerId extends Comparable<ContainerId>{
+ public abstract ApplicationAttemptId getAppAttemptId();
+ public abstract ApplicationId getAppId();
+ public abstract int getId();
+
+ public abstract void setAppAttemptId(ApplicationAttemptId atId);
+ public abstract void setAppId(ApplicationId appID);
+ public abstract void setId(int id);
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java
new file mode 100644
index 0000000..f9a7a17
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java
@@ -0,0 +1,71 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.api.records;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.Map;
+
+public interface ContainerLaunchContext {
+ ContainerId getContainerId();
+ String getUser();
+ Resource getResource();
+
+ Map<String, LocalResource> getAllLocalResources();
+ LocalResource getLocalResource(String key);
+
+
+ ByteBuffer getContainerTokens();
+
+ Map<String, ByteBuffer> getAllServiceData();
+ ByteBuffer getServiceData(String key);
+
+ Map<String, String> getAllEnv();
+ String getEnv(String key);
+
+ List<String> getCommandList();
+ String getCommand(int index);
+ int getCommandCount();
+
+ void setContainerId(ContainerId containerId);
+ void setUser(String user);
+ void setResource(Resource resource);
+
+ void addAllLocalResources(Map<String, LocalResource> localResources);
+ void setLocalResource(String key, LocalResource value);
+ void removeLocalResource(String key);
+ void clearLocalResources();
+
+ void setContainerTokens(ByteBuffer containerToken);
+
+ void addAllServiceData(Map<String, ByteBuffer> serviceData);
+ void setServiceData(String key, ByteBuffer value);
+ void removeServiceData(String key);
+ void clearServiceData();
+
+ void addAllEnv(Map<String, String> env);
+ void setEnv(String key, String value);
+ void removeEnv(String key);
+ void clearEnv();
+
+ void addAllCommands(List<String> commands);
+ void addCommand(String command);
+ void removeCommand(int index);
+ void clearCommands();
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
new file mode 100644
index 0000000..54b5185
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
@@ -0,0 +1,5 @@
+package org.apache.hadoop.yarn.api.records;
+
+public enum ContainerState {
+ NEW, RUNNING, COMPLETE
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
new file mode 100644
index 0000000..c52a47c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
@@ -0,0 +1,31 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.api.records;
+
+public interface ContainerStatus {
+ ContainerId getContainerId();
+ ContainerState getState();
+ String getExitStatus();
+ String getDiagnostics();
+
+ void setContainerId(ContainerId containerId);
+ void setState(ContainerState state);
+ void setExitStatus(String exitStatus);
+ void setDiagnostics(String diagnostics);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerToken.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerToken.java
new file mode 100644
index 0000000..52290db
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerToken.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import java.nio.ByteBuffer;
+
+public interface ContainerToken {
+ public abstract ByteBuffer getIdentifier();
+ public abstract ByteBuffer getPassword();
+ public abstract String getKind();
+ public abstract String getService();
+
+ public abstract void setIdentifier(ByteBuffer identifier);
+ public abstract void setPassword(ByteBuffer password);
+ public abstract void setKind(String kind);
+ public abstract void setService(String service);
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResource.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResource.java
new file mode 100644
index 0000000..e6c05e5
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResource.java
@@ -0,0 +1,15 @@
+package org.apache.hadoop.yarn.api.records;
+
+public interface LocalResource {
+ public abstract URL getResource();
+ public abstract long getSize();
+ public abstract long getTimestamp();
+ public abstract LocalResourceType getType();
+ public abstract LocalResourceVisibility getVisibility();
+
+ public abstract void setResource(URL resource);
+ public abstract void setSize(long size);
+ public abstract void setTimestamp(long timestamp);
+ public abstract void setType(LocalResourceType type);
+ public abstract void setVisibility(LocalResourceVisibility visibility);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceType.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceType.java
new file mode 100644
index 0000000..5647ab6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceType.java
@@ -0,0 +1,5 @@
+package org.apache.hadoop.yarn.api.records;
+
+public enum LocalResourceType {
+ ARCHIVE, FILE
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceVisibility.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceVisibility.java
new file mode 100644
index 0000000..9dd8b6c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceVisibility.java
@@ -0,0 +1,5 @@
+package org.apache.hadoop.yarn.api.records;
+
+public enum LocalResourceVisibility {
+ PUBLIC, PRIVATE, APPLICATION
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeHealthStatus.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeHealthStatus.java
new file mode 100644
index 0000000..01f5f60
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeHealthStatus.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.records;
+
+public interface NodeHealthStatus {
+
+ boolean getIsNodeHealthy();
+
+ String getHealthReport();
+
+ long getLastHealthReportTime();
+
+ void setIsNodeHealthy(boolean isNodeHealthy);
+
+ void setHealthReport(String healthReport);
+
+ void setLastHealthReportTime(long lastHealthReport);
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeId.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeId.java
new file mode 100644
index 0000000..f77e968
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeId.java
@@ -0,0 +1,10 @@
+package org.apache.hadoop.yarn.api.records;
+
+public interface NodeId extends Comparable<NodeId> {
+
+ String getHost();
+ void setHost(String host);
+
+ int getPort();
+ void setPort(int port);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
new file mode 100644
index 0000000..abe1b03
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
@@ -0,0 +1,18 @@
+package org.apache.hadoop.yarn.api.records;
+
+public interface NodeReport {
+ NodeId getNodeId();
+ void setNodeId(NodeId nodeId);
+ String getHttpAddress();
+ void setHttpAddress(String httpAddress);
+ String getRackName();
+ void setRackName(String rackName);
+ Resource getUsed();
+ void setUsed(Resource used);
+ Resource getCapability();
+ void setCapability(Resource capability);
+ int getNumContainers();
+ void setNumContainers(int numContainers);
+ NodeHealthStatus getNodeHealthStatus();
+ void setNodeHealthStatus(NodeHealthStatus nodeHealthStatus);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Priority.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Priority.java
new file mode 100644
index 0000000..65ade8a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Priority.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.yarn.api.records;
+
+public interface Priority extends Comparable<Priority> {
+
+ public abstract int getPriority();
+
+ public abstract void setPriority(int priority);
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProtoBase.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProtoBase.java
new file mode 100644
index 0000000..9d14525
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProtoBase.java
@@ -0,0 +1,43 @@
+package org.apache.hadoop.yarn.api.records;
+
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.yarn.util.ProtoUtils;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.Message;
+
+public abstract class ProtoBase <T extends Message> {
+
+ public abstract T getProto();
+
+ //TODO Force a comparator?
+
+ @Override
+ public int hashCode() {
+ return getProto().hashCode();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == null)
+ return false;
+ if (other.getClass().isAssignableFrom(this.getClass())) {
+ return this.getProto().equals(this.getClass().cast(other).getProto());
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " ");
+ }
+
+ protected final ByteBuffer convertFromProtoFormat(ByteString byteString) {
+ return ProtoUtils.convertFromProtoFormat(byteString);
+ }
+
+ protected final ByteString convertToProtoFormat(ByteBuffer byteBuffer) {
+ return ProtoUtils.convertToProtoFormat(byteBuffer);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueACL.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueACL.java
new file mode 100644
index 0000000..07b6ab2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueACL.java
@@ -0,0 +1,7 @@
+package org.apache.hadoop.yarn.api.records;
+
+public enum QueueACL {
+ SUBMIT_JOB,
+ ADMINISTER_QUEUE,
+ ADMINISTER_JOBS; // currently unused
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
new file mode 100644
index 0000000..4a1487c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
@@ -0,0 +1,26 @@
+package org.apache.hadoop.yarn.api.records;
+
+import java.util.List;
+
+public interface QueueInfo {
+ String getQueueName();
+ void setQueueName(String queueName);
+
+ float getCapacity();
+ void setCapacity(float capacity);
+
+ float getMaximumCapacity();
+ void setMaximumCapacity(float maximumCapacity);
+
+ float getCurrentCapacity();
+ void setCurrentCapacity(float currentCapacity);
+
+ List<QueueInfo> getChildQueues();
+ void setChildQueues(List<QueueInfo> childQueues);
+
+ List<ApplicationReport> getApplications();
+ void setApplications(List<ApplicationReport> applications);
+
+ QueueState getQueueState();
+ void setQueueState(QueueState queueState);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueState.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueState.java
new file mode 100644
index 0000000..feece4e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueState.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.yarn.api.records;
+
+/**
+ * State of a Queue
+ */
+public enum QueueState {
+ STOPPED,
+ RUNNING
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueUserACLInfo.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueUserACLInfo.java
new file mode 100644
index 0000000..4f05f12
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueUserACLInfo.java
@@ -0,0 +1,11 @@
+package org.apache.hadoop.yarn.api.records;
+
+import java.util.List;
+
+public interface QueueUserACLInfo {
+ String getQueueName();
+ void setQueueName(String queueName);
+
+ List<QueueACL> getUserAcls();
+ void setUserAcls(List<QueueACL> acls);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
new file mode 100644
index 0000000..487eafb
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.yarn.api.records;
+
+public interface Resource extends Comparable<Resource> {
+ public abstract int getMemory();
+
+ public abstract void setMemory(int memory);
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
new file mode 100644
index 0000000..661fd2a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
@@ -0,0 +1,15 @@
+package org.apache.hadoop.yarn.api.records;
+
+public interface ResourceRequest extends Comparable<ResourceRequest>{
+ public abstract Priority getPriority();
+ public abstract String getHostName();
+ public abstract Resource getCapability();
+ public abstract int getNumContainers();
+
+ public abstract void setPriority(Priority priority);
+ public abstract void setHostName(String hostName);
+ public abstract void setCapability(Resource capability);
+ public abstract void setNumContainers(int numContainers);
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/URL.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/URL.java
new file mode 100644
index 0000000..d1a5ff7
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/URL.java
@@ -0,0 +1,13 @@
+package org.apache.hadoop.yarn.api.records;
+
+public interface URL {
+ public abstract String getScheme();
+ public abstract String getHost();
+ public abstract int getPort();
+ public abstract String getFile();
+
+ public abstract void setScheme(String scheme);
+ public abstract void setHost(String host);
+ public abstract void setPort(int port);
+ public abstract void setFile(String file);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnClusterMetrics.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnClusterMetrics.java
new file mode 100644
index 0000000..6b6824e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnClusterMetrics.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.yarn.api.records;
+
+public interface YarnClusterMetrics {
+
+ public abstract int getNumNodeManagers();
+
+ public abstract void setNumNodeManagers(int numNodeManagers);
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/AMResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/AMResponsePBImpl.java
new file mode 100644
index 0000000..d1fee0a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/AMResponsePBImpl.java
@@ -0,0 +1,287 @@
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.AMResponse;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.proto.YarnProtos.AMResponseProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.AMResponseProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+
+
+
+public class AMResponsePBImpl extends ProtoBase<AMResponseProto> implements AMResponse {
+ AMResponseProto proto = AMResponseProto.getDefaultInstance();
+ AMResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ Resource limit;
+
+ private List<Container> newContainersList = null;
+ private List<Container> finishedContainersList = null;
+// private boolean hasLocalContainerList = false;
+
+
+ public AMResponsePBImpl() {
+ builder = AMResponseProto.newBuilder();
+ }
+
+ public AMResponsePBImpl(AMResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public synchronized AMResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private synchronized void mergeLocalToBuilder() {
+ if (this.newContainersList != null) {
+ builder.clearNewContainers();
+ Iterable<ContainerProto> iterable = getProtoIterable(this.newContainersList);
+ builder.addAllNewContainers(iterable);
+ }
+ if (this.finishedContainersList != null) {
+ builder.clearFinishedContainers();
+ Iterable<ContainerProto> iterable = getProtoIterable(this.finishedContainersList);
+ builder.addAllFinishedContainers(iterable);
+ }
+ if (this.limit != null) {
+ builder.setLimit(convertToProtoFormat(this.limit));
+ }
+ }
+
+ private synchronized void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private synchronized void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = AMResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public synchronized boolean getReboot() {
+ AMResponseProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getReboot());
+ }
+
+ @Override
+ public synchronized void setReboot(boolean reboot) {
+ maybeInitBuilder();
+ builder.setReboot((reboot));
+ }
+ @Override
+ public synchronized int getResponseId() {
+ AMResponseProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getResponseId());
+ }
+
+ @Override
+ public synchronized void setResponseId(int responseId) {
+ maybeInitBuilder();
+ builder.setResponseId((responseId));
+ }
+ @Override
+ public synchronized Resource getAvailableResources() {
+ if (this.limit != null) {
+ return this.limit;
+ }
+
+ AMResponseProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasLimit()) {
+ return null;
+ }
+ this.limit = convertFromProtoFormat(p.getLimit());
+ return this.limit;
+ }
+
+ @Override
+ public synchronized void setAvailableResources(Resource limit) {
+ maybeInitBuilder();
+ if (limit == null)
+ builder.clearLimit();
+ this.limit = limit;
+ }
+
+ @Override
+ public synchronized List<Container> getNewContainerList() {
+ initLocalNewContainerList();
+ return this.newContainersList;
+ }
+
+ @Override
+ public synchronized Container getNewContainer(int index) {
+ initLocalNewContainerList();
+ return this.newContainersList.get(index);
+ }
+ @Override
+ public synchronized int getNewContainerCount() {
+ initLocalNewContainerList();
+ return this.newContainersList.size();
+ }
+
+ //Once this is called. containerList will never be null - untill a getProto is called.
+ private synchronized void initLocalNewContainerList() {
+ if (this.newContainersList != null) {
+ return;
+ }
+ AMResponseProtoOrBuilder p = viaProto ? proto : builder;
+ List<ContainerProto> list = p.getNewContainersList();
+ newContainersList = new ArrayList<Container>();
+
+ for (ContainerProto c : list) {
+ newContainersList.add(convertFromProtoFormat(c));
+ }
+ }
+
+ @Override
+ public synchronized void addAllNewContainers(final List<Container> containers) {
+ if (containers == null)
+ return;
+ initLocalNewContainerList();
+ newContainersList.addAll(containers);
+ }
+
+ private synchronized Iterable<ContainerProto> getProtoIterable(
+ final List<Container> newContainersList) {
+ maybeInitBuilder();
+ return new Iterable<ContainerProto>() {
+ @Override
+ public synchronized Iterator<ContainerProto> iterator() {
+ return new Iterator<ContainerProto>() {
+
+ Iterator<Container> iter = newContainersList.iterator();
+
+ @Override
+ public synchronized boolean hasNext() {
+ return iter.hasNext();
+ }
+
+ @Override
+ public synchronized ContainerProto next() {
+ return convertToProtoFormat(iter.next());
+ }
+
+ @Override
+ public synchronized void remove() {
+ throw new UnsupportedOperationException();
+
+ }
+ };
+
+ }
+ };
+ }
+
+ @Override
+ public synchronized void addNewContainer(Container containers) {
+ initLocalNewContainerList();
+ if (containers == null)
+ return;
+ this.newContainersList.add(containers);
+ }
+
+ @Override
+ public synchronized void removeNewContainer(int index) {
+ initLocalNewContainerList();
+ this.newContainersList.remove(index);
+ }
+ @Override
+ public synchronized void clearNewContainers() {
+ initLocalNewContainerList();
+ this.newContainersList.clear();
+ }
+
+ //// Finished containers
+ @Override
+ public synchronized List<Container> getFinishedContainerList() {
+ initLocalFinishedContainerList();
+ return this.finishedContainersList;
+ }
+
+ @Override
+ public synchronized Container getFinishedContainer(int index) {
+ initLocalFinishedContainerList();
+ return this.finishedContainersList.get(index);
+ }
+ @Override
+ public synchronized int getFinishedContainerCount() {
+ initLocalFinishedContainerList();
+ return this.finishedContainersList.size();
+ }
+
+ //Once this is called. containerList will never be null - untill a getProto is called.
+ private synchronized void initLocalFinishedContainerList() {
+ if (this.finishedContainersList != null) {
+ return;
+ }
+ AMResponseProtoOrBuilder p = viaProto ? proto : builder;
+ List<ContainerProto> list = p.getFinishedContainersList();
+ finishedContainersList = new ArrayList<Container>();
+
+ for (ContainerProto c : list) {
+ finishedContainersList.add(convertFromProtoFormat(c));
+ }
+ }
+
+ @Override
+ public synchronized void addAllFinishedContainers(final List<Container> containers) {
+ if (containers == null)
+ return;
+ initLocalFinishedContainerList();
+ finishedContainersList.addAll(containers);
+ }
+
+ @Override
+ public synchronized void addFinishedContainer(Container containers) {
+ initLocalFinishedContainerList();
+ if (containers == null)
+ return;
+ this.finishedContainersList.add(containers);
+ }
+
+ @Override
+ public synchronized void removeFinishedContainer(int index) {
+ initLocalFinishedContainerList();
+ this.finishedContainersList.remove(index);
+ }
+ @Override
+ public synchronized void clearFinishedContainers() {
+ initLocalFinishedContainerList();
+ this.finishedContainersList.clear();
+ }
+
+ private synchronized ContainerPBImpl convertFromProtoFormat(ContainerProto p) {
+ return new ContainerPBImpl(p);
+ }
+
+ private synchronized ContainerProto convertToProtoFormat(Container t) {
+ return ((ContainerPBImpl)t).getProto();
+ }
+
+ private synchronized ResourcePBImpl convertFromProtoFormat(ResourceProto p) {
+ return new ResourcePBImpl(p);
+ }
+
+ private synchronized ResourceProto convertToProtoFormat(Resource r) {
+ return ((ResourcePBImpl) r).getProto();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptIdPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptIdPBImpl.java
new file mode 100644
index 0000000..a0c5aec
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptIdPBImpl.java
@@ -0,0 +1,142 @@
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+
+import java.text.NumberFormat;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
+
+public class ApplicationAttemptIdPBImpl extends ProtoBase<ApplicationAttemptIdProto> implements ApplicationAttemptId {
+ ApplicationAttemptIdProto proto = ApplicationAttemptIdProto.getDefaultInstance();
+ ApplicationAttemptIdProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ApplicationId applicationId = null;
+ protected static final NumberFormat idFormat = NumberFormat.getInstance();
+ static {
+ idFormat.setGroupingUsed(false);
+ idFormat.setMinimumIntegerDigits(4);
+ }
+
+ protected static final NumberFormat counterFormat = NumberFormat.getInstance();
+ static {
+ counterFormat.setGroupingUsed(false);
+ counterFormat.setMinimumIntegerDigits(6);
+ }
+
+
+ public ApplicationAttemptIdPBImpl() {
+ builder = ApplicationAttemptIdProto.newBuilder();
+ }
+
+ public ApplicationAttemptIdPBImpl(ApplicationAttemptIdProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public synchronized ApplicationAttemptIdProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private synchronized void mergeLocalToBuilder() {
+ if (this.applicationId != null && !((ApplicationIdPBImpl)applicationId).getProto().equals(builder.getApplicationId())) {
+ builder.setApplicationId(convertToProtoFormat(this.applicationId));
+ }
+ }
+
+ private synchronized void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private synchronized void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = ApplicationAttemptIdProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public synchronized int getAttemptId() {
+ ApplicationAttemptIdProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getAttemptId());
+ }
+
+ @Override
+ public synchronized void setAttemptId(int attemptId) {
+ maybeInitBuilder();
+ builder.setAttemptId((attemptId));
+ }
+ @Override
+ public synchronized ApplicationId getApplicationId() {
+ ApplicationAttemptIdProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.applicationId != null) {
+ return this.applicationId;
+ }
+ if (!p.hasApplicationId()) {
+ return null;
+ }
+ this.applicationId = convertFromProtoFormat(p.getApplicationId());
+ return this.applicationId;
+ }
+
+ @Override
+ public synchronized void setApplicationId(ApplicationId appId) {
+ maybeInitBuilder();
+ if (appId == null)
+ builder.clearApplicationId();
+ this.applicationId = appId;
+ }
+
+ private synchronized ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) {
+ return new ApplicationIdPBImpl(p);
+ }
+
+ private synchronized ApplicationIdProto convertToProtoFormat(ApplicationId t) {
+ return ((ApplicationIdPBImpl)t).getProto();
+ }
+
+ @Override
+ public synchronized int hashCode() {
+ return getProto().hashCode();
+ }
+
+ @Override
+ public synchronized boolean equals(Object other) {
+ if (other == null) return false;
+ if (other.getClass().isAssignableFrom(this.getClass())) {
+ return this.getProto().equals(this.getClass().cast(other).getProto());
+ }
+ return false;
+ }
+
+ @Override
+ public synchronized int compareTo(ApplicationAttemptId other) {
+ int compareAppIds = this.getApplicationId().compareTo(
+ other.getApplicationId());
+ if (compareAppIds == 0) {
+ return this.getAttemptId() - other.getAttemptId();
+ } else {
+ return compareAppIds;
+ }
+
+ }
+
+ @Override
+ public synchronized String toString() {
+ String id = (this.getApplicationId() != null) ? this.getApplicationId().getClusterTimestamp() + "_" +
+ idFormat.format(this.getApplicationId().getId()): "none";
+ return "appattempt_" + id + "_" + counterFormat.format(getAttemptId());
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java
new file mode 100644
index 0000000..932e39c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java
@@ -0,0 +1,76 @@
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProtoOrBuilder;
+
+
+
+public class ApplicationIdPBImpl extends ProtoBase<ApplicationIdProto> implements ApplicationId {
+ ApplicationIdProto proto = ApplicationIdProto.getDefaultInstance();
+ ApplicationIdProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public ApplicationIdPBImpl() {
+ builder = ApplicationIdProto.newBuilder();
+ }
+
+ public ApplicationIdPBImpl(ApplicationIdProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public ApplicationIdProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = ApplicationIdProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public int getId() {
+ ApplicationIdProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getId());
+ }
+
+ @Override
+ public void setId(int id) {
+ maybeInitBuilder();
+ builder.setId((id));
+ }
+ @Override
+ public long getClusterTimestamp() {
+ ApplicationIdProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getClusterTimestamp());
+ }
+
+ @Override
+ public void setClusterTimestamp(long clusterTimestamp) {
+ maybeInitBuilder();
+ builder.setClusterTimestamp((clusterTimestamp));
+ }
+
+ @Override
+ public int compareTo(ApplicationId other) {
+ if (this.getId() - other.getId() == 0) {
+ return this.getClusterTimestamp() > other.getClusterTimestamp() ? 1 :
+ this.getClusterTimestamp() < other.getClusterTimestamp() ? -1 : 0;
+ } else {
+ return this.getId() - other.getId();
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "application_" + this.getClusterTimestamp() + "_" + this.getId();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationMasterPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationMasterPBImpl.java
new file mode 100644
index 0000000..0c7f1d3
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationMasterPBImpl.java
@@ -0,0 +1,259 @@
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationMaster;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.api.records.ApplicationStatus;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationMasterProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationMasterProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationStateProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationStatusProto;
+import org.apache.hadoop.yarn.util.ProtoUtils;
+
+
+
+public class ApplicationMasterPBImpl extends ProtoBase<ApplicationMasterProto> implements ApplicationMaster {
+ ApplicationMasterProto proto = ApplicationMasterProto.getDefaultInstance();
+ ApplicationMasterProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ApplicationId applicationId = null;
+ private ApplicationStatus applicationStatus = null;
+
+
+ public ApplicationMasterPBImpl() {
+ builder = ApplicationMasterProto.newBuilder();
+ }
+
+ public ApplicationMasterPBImpl(ApplicationMasterProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public ApplicationMasterProto getProto() {
+
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.applicationId != null && !((ApplicationIdPBImpl)this.applicationId).getProto().equals(builder.getApplicationId())) {
+ builder.setApplicationId(convertToProtoFormat(this.applicationId));
+ }
+
+ if (this.applicationStatus != null && !((ApplicationStatusPBImpl)this.applicationStatus).getProto().equals(builder.getStatus())) {
+ builder.setStatus(convertToProtoFormat(this.applicationStatus));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = ApplicationMasterProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public ApplicationState getState() {
+ ApplicationMasterProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasState()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getState());
+ }
+
+ @Override
+ public void setState(ApplicationState state) {
+ maybeInitBuilder();
+ if (state == null) {
+ builder.clearState();
+ return;
+ }
+ builder.setState(convertToProtoFormat(state));
+ }
+ @Override
+ public String getHost() {
+ ApplicationMasterProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasHost()) {
+ return null;
+ }
+ return (p.getHost());
+ }
+
+ @Override
+ public void setHost(String host) {
+ maybeInitBuilder();
+ if (host == null) {
+ builder.clearHost();
+ return;
+ }
+ builder.setHost((host));
+ }
+
+ @Override
+ public ApplicationId getApplicationId() {
+ ApplicationMasterProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.applicationId != null) {
+ return applicationId;
+ } // Else via proto
+ if (!p.hasApplicationId()) {
+ return null;
+ }
+ applicationId = convertFromProtoFormat(p.getApplicationId());
+
+ return applicationId;
+ }
+
+ @Override
+ public void setApplicationId(ApplicationId applicationId) {
+ maybeInitBuilder();
+ if (applicationId == null)
+ builder.clearApplicationId();
+ this.applicationId = applicationId;
+
+ }
+ @Override
+ public int getRpcPort() {
+ ApplicationMasterProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getRpcPort());
+ }
+
+ @Override
+ public void setRpcPort(int rpcPort) {
+ maybeInitBuilder();
+ builder.setRpcPort((rpcPort));
+ }
+ @Override
+ public String getTrackingUrl() {
+ ApplicationMasterProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getTrackingUrl());
+ }
+
+ @Override
+ public void setTrackingUrl(String url) {
+ maybeInitBuilder();
+ builder.setTrackingUrl(url);
+ }
+ @Override
+ public ApplicationStatus getStatus() {
+ ApplicationMasterProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.applicationStatus != null) {
+ return this.applicationStatus;
+ } // Else via proto
+ if (!p.hasStatus()) {
+ return null;
+ }
+ this.applicationStatus = convertFromProtoFormat(p.getStatus());
+
+ return this.applicationStatus;
+ }
+
+ @Override
+ public void setStatus(ApplicationStatus status) {
+ maybeInitBuilder();
+ if (status == null)
+ builder.clearStatus();
+ this.applicationStatus = status;
+
+ }
+ @Override
+ public String getClientToken() {
+ ApplicationMasterProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasClientToken()) {
+ return null;
+ }
+ return (p.getClientToken());
+ }
+
+ @Override
+ public void setClientToken(String clientToken) {
+ maybeInitBuilder();
+ if (clientToken == null) {
+ builder.clearClientToken();
+ return;
+ }
+ builder.setClientToken((clientToken));
+ }
+
+ @Override
+ public int getAMFailCount() {
+ ApplicationMasterProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getAmFailCount());
+ }
+
+ @Override
+ public int getContainerCount() {
+ ApplicationMasterProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getContainerCount());
+ }
+
+ @Override
+ public void setAMFailCount(int amFailCount) {
+ maybeInitBuilder();
+ builder.setAmFailCount(amFailCount);
+ }
+
+ @Override
+ public void setContainerCount(int containerCount) {
+ maybeInitBuilder();
+ builder.setContainerCount(containerCount);
+ }
+
+ @Override
+ public String getDiagnostics() {
+ ApplicationMasterProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getDiagnostics();
+ }
+
+ @Override
+ public void setDiagnostics(String diagnostics) {
+ maybeInitBuilder();
+ if (diagnostics == null) {
+ builder.clearDiagnostics();
+ return;
+ }
+ builder.setDiagnostics(diagnostics);
+ }
+
+ private ApplicationStateProto convertToProtoFormat(ApplicationState e) {
+ return ProtoUtils.convertToProtoFormat(e);
+ }
+
+ private ApplicationState convertFromProtoFormat(ApplicationStateProto e) {
+ return ProtoUtils.convertFromProtoFormat(e);
+ }
+
+ private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) {
+ return new ApplicationIdPBImpl(p);
+ }
+
+ private ApplicationIdProto convertToProtoFormat(ApplicationId t) {
+ return ((ApplicationIdPBImpl)t).getProto();
+ }
+
+ private ApplicationStatusPBImpl convertFromProtoFormat(ApplicationStatusProto p) {
+ return new ApplicationStatusPBImpl(p);
+ }
+
+ private ApplicationStatusProto convertToProtoFormat(ApplicationStatus t) {
+ return ((ApplicationStatusPBImpl)t).getProto();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java
new file mode 100644
index 0000000..b1a63ee
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java
@@ -0,0 +1,264 @@
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationStateProto;
+import org.apache.hadoop.yarn.util.ProtoUtils;
+
+public class ApplicationReportPBImpl extends ProtoBase<ApplicationReportProto>
+implements ApplicationReport {
+ ApplicationReportProto proto = ApplicationReportProto.getDefaultInstance();
+ ApplicationReportProto.Builder builder = null;
+ boolean viaProto = false;
+
+ ApplicationId applicationId;
+
+ public ApplicationReportPBImpl() {
+ builder = ApplicationReportProto.newBuilder();
+ }
+
+ public ApplicationReportPBImpl(ApplicationReportProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ @Override
+ public ApplicationId getApplicationId() {
+ if (this.applicationId != null) {
+ return this.applicationId;
+ }
+
+ ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasApplicationId()) {
+ return null;
+ }
+ this.applicationId = convertFromProtoFormat(p.getApplicationId());
+ return this.applicationId;
+ }
+
+ @Override
+ public String getTrackingUrl() {
+ ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasTrackingUrl()) {
+ return null;
+ }
+ return p.getTrackingUrl();
+ }
+
+ @Override
+ public String getName() {
+ ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasName()) {
+ return null;
+ }
+ return p.getName();
+ }
+
+ @Override
+ public String getQueue() {
+ ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasQueue()) {
+ return null;
+ }
+ return p.getQueue();
+ }
+
+ @Override
+ public ApplicationState getState() {
+ ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasState()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getState());
+ }
+
+ @Override
+ public String getHost() {
+ ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasHost()) {
+ return null;
+ }
+ return (p.getHost());
+ }
+
+ @Override
+ public int getRpcPort() {
+ ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getRpcPort());
+ }
+
+ @Override
+ public String getClientToken() {
+ ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasClientToken()) {
+ return null;
+ }
+ return (p.getClientToken());
+ }
+
+ @Override
+ public String getUser() {
+ ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasUser()) {
+ return null;
+ }
+ return p.getUser();
+ }
+
+
+ @Override
+ public String getDiagnostics() {
+ ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasDiagnostics()) {
+ return null;
+ }
+ return p.getDiagnostics();
+ }
+
+ @Override
+ public void setApplicationId(ApplicationId applicationId) {
+ maybeInitBuilder();
+ if (applicationId == null)
+ builder.clearStatus();
+ this.applicationId = applicationId;
+ }
+
+ @Override
+ public void setTrackingUrl(String url) {
+ maybeInitBuilder();
+ if (url == null) {
+ builder.clearTrackingUrl();
+ return;
+ }
+ builder.setTrackingUrl(url);
+ }
+
+ @Override
+ public void setName(String name) {
+ maybeInitBuilder();
+ if (name == null) {
+ builder.clearName();
+ return;
+ }
+ builder.setName(name);
+ }
+
+ @Override
+ public void setQueue(String queue) {
+ maybeInitBuilder();
+ if (queue == null) {
+ builder.clearQueue();
+ return;
+ }
+ builder.setQueue(queue);
+ }
+
+ @Override
+ public void setState(ApplicationState state) {
+ maybeInitBuilder();
+ if (state == null) {
+ builder.clearState();
+ return;
+ }
+ builder.setState(convertToProtoFormat(state));
+ }
+
+ @Override
+ public void setHost(String host) {
+ maybeInitBuilder();
+ if (host == null) {
+ builder.clearHost();
+ return;
+ }
+ builder.setHost((host));
+ }
+
+ @Override
+ public void setRpcPort(int rpcPort) {
+ maybeInitBuilder();
+ builder.setRpcPort((rpcPort));
+ }
+
+ @Override
+ public void setClientToken(String clientToken) {
+ maybeInitBuilder();
+ if (clientToken == null) {
+ builder.clearClientToken();
+ return;
+ }
+ builder.setClientToken((clientToken));
+ }
+
+ @Override
+ public void setUser(String user) {
+ maybeInitBuilder();
+ if (user == null) {
+ builder.clearUser();
+ return;
+ }
+ builder.setUser((user));
+ }
+
+ @Override
+ public void setDiagnostics(String diagnostics) {
+ maybeInitBuilder();
+ if (diagnostics == null) {
+ builder.clearDiagnostics();
+ return;
+ }
+ builder.setDiagnostics(diagnostics);
+ }
+
+ @Override
+ public ApplicationReportProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.applicationId != null
+ && !((ApplicationIdPBImpl) this.applicationId).getProto().equals(
+ builder.getApplicationId())) {
+ builder.setApplicationId(convertToProtoFormat(this.applicationId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = ApplicationReportProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ private ApplicationIdProto convertToProtoFormat(ApplicationId t) {
+ return ((ApplicationIdPBImpl) t).getProto();
+ }
+
+ private ApplicationState convertFromProtoFormat(ApplicationStateProto s) {
+ return ProtoUtils.convertFromProtoFormat(s);
+ }
+
+ private ApplicationStateProto convertToProtoFormat(ApplicationState s) {
+ return ProtoUtils.convertToProtoFormat(s);
+ }
+
+ private ApplicationIdPBImpl convertFromProtoFormat(
+ ApplicationIdProto applicationId) {
+ return new ApplicationIdPBImpl(applicationId);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationStatusPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationStatusPBImpl.java
new file mode 100644
index 0000000..114711f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationStatusPBImpl.java
@@ -0,0 +1,116 @@
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationStatus;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationStatusProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationStatusProtoOrBuilder;
+
+
+
+public class ApplicationStatusPBImpl extends ProtoBase<ApplicationStatusProto> implements ApplicationStatus {
+ ApplicationStatusProto proto = ApplicationStatusProto.getDefaultInstance();
+ ApplicationStatusProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ApplicationAttemptId applicationAttemptId = null;
+
+
+ public ApplicationStatusPBImpl() {
+ builder = ApplicationStatusProto.newBuilder();
+ }
+
+ public ApplicationStatusPBImpl(ApplicationStatusProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public ApplicationStatusProto getProto() {
+
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.applicationAttemptId != null && !((ApplicationAttemptIdPBImpl)this.applicationAttemptId).getProto().equals(builder.getApplicationAttemptId())) {
+ builder.setApplicationAttemptId(convertToProtoFormat(this.applicationAttemptId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = ApplicationStatusProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public int getResponseId() {
+ ApplicationStatusProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getResponseId());
+ }
+
+ @Override
+ public void setResponseId(int responseId) {
+ maybeInitBuilder();
+ builder.setResponseId((responseId));
+ }
+
+ @Override
+ public ApplicationAttemptId getApplicationAttemptId() {
+ ApplicationStatusProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.applicationAttemptId != null) {
+ return this.applicationAttemptId;
+ }
+ if (!p.hasApplicationAttemptId()) {
+ return null;
+ }
+ this.applicationAttemptId = convertFromProtoFormat(p.getApplicationAttemptId());
+ return this.applicationAttemptId;
+ }
+
+ @Override
+ public void setApplicationAttemptId(ApplicationAttemptId applicationAttemptId) {
+ maybeInitBuilder();
+ if (applicationAttemptId == null)
+ builder.clearApplicationAttemptId();
+ this.applicationAttemptId = applicationAttemptId;
+ }
+
+ @Override
+ public float getProgress() {
+ ApplicationStatusProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getProgress());
+ }
+
+ @Override
+ public void setProgress(float progress) {
+ maybeInitBuilder();
+ builder.setProgress((progress));
+ }
+
+ private ApplicationAttemptIdPBImpl convertFromProtoFormat(ApplicationAttemptIdProto p) {
+ return new ApplicationAttemptIdPBImpl(p);
+ }
+
+ private ApplicationAttemptIdProto convertToProtoFormat(ApplicationAttemptId t) {
+ return ((ApplicationAttemptIdPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java
new file mode 100644
index 0000000..ae43a69
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java
@@ -0,0 +1,642 @@
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.StringLocalResourceMapProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.StringStringMapProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.StringURLMapProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.URLProto;
+
+
+
+public class ApplicationSubmissionContextPBImpl extends ProtoBase<ApplicationSubmissionContextProto> implements ApplicationSubmissionContext {
+ ApplicationSubmissionContextProto proto = ApplicationSubmissionContextProto.getDefaultInstance();
+ ApplicationSubmissionContextProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ApplicationId applicationId = null;
+ private Resource masterCapability = null;
+ private Map<String, URL> resources = null;
+ private Map<String, LocalResource> resourcesTodo = null;
+ private List<String> fsTokenList = null;
+ private ByteBuffer fsTokenTodo = null;
+ private Map<String, String> environment = null;
+ private List<String> commandList = null;
+ private Priority priority = null;
+
+
+
+ public ApplicationSubmissionContextPBImpl() {
+ builder = ApplicationSubmissionContextProto.newBuilder();
+ }
+
+ public ApplicationSubmissionContextPBImpl(ApplicationSubmissionContextProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public ApplicationSubmissionContextProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.applicationId != null) {
+ builder.setApplicationId(convertToProtoFormat(this.applicationId));
+ }
+ if (this.masterCapability != null) {
+ builder.setMasterCapability(convertToProtoFormat(this.masterCapability));
+ }
+ if (this.resources != null) {
+ addResourcesToProto();
+ }
+ if (this.resourcesTodo != null) {
+ addResourcesTodoToProto();
+ }
+ if (this.fsTokenList != null) {
+ addFsTokenListToProto();
+ }
+ if (this.fsTokenTodo != null) {
+ builder.setFsTokensTodo(convertToProtoFormat(this.fsTokenTodo));
+ }
+ if (this.environment != null) {
+ addEnvironmentToProto();
+ }
+ if (this.commandList != null) {
+ addCommandsToProto();
+ }
+ if (this.priority != null) {
+ builder.setPriority(convertToProtoFormat(this.priority));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = ApplicationSubmissionContextProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public Priority getPriority() {
+ ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.priority != null) {
+ return this.priority;
+ }
+ if (!p.hasPriority()) {
+ return null;
+ }
+ this.priority = convertFromProtoFormat(p.getPriority());
+ return this.priority;
+ }
+
+ @Override
+ public void setPriority(Priority priority) {
+ maybeInitBuilder();
+ if (priority == null)
+ builder.clearPriority();
+ this.priority = priority;
+ }
+ @Override
+ public ApplicationId getApplicationId() {
+ ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.applicationId != null) {
+ return applicationId;
+ } // Else via proto
+ if (!p.hasApplicationId()) {
+ return null;
+ }
+ applicationId = convertFromProtoFormat(p.getApplicationId());
+ return applicationId;
+ }
+
+ @Override
+ public void setApplicationId(ApplicationId applicationId) {
+ maybeInitBuilder();
+ if (applicationId == null)
+ builder.clearApplicationId();
+ this.applicationId = applicationId;
+ }
+ @Override
+ public String getApplicationName() {
+ ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasApplicationName()) {
+ return null;
+ }
+ return (p.getApplicationName());
+ }
+
+ @Override
+ public void setApplicationName(String applicationName) {
+ maybeInitBuilder();
+ if (applicationName == null) {
+ builder.clearApplicationName();
+ return;
+ }
+ builder.setApplicationName((applicationName));
+ }
+ @Override
+ public Resource getMasterCapability() {
+ ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.masterCapability != null) {
+ return masterCapability;
+ } // Else via proto
+ if (!p.hasMasterCapability()) {
+ return null;
+ }
+ masterCapability = convertFromProtoFormat(p.getMasterCapability());
+ return this.masterCapability;
+ }
+
+ @Override
+ public void setMasterCapability(Resource masterCapability) {
+ maybeInitBuilder();
+ if (masterCapability == null)
+ builder.clearMasterCapability();
+ this.masterCapability = masterCapability;
+ }
+ @Override
+ public Map<String, URL> getAllResources() {
+ initResources();
+ return this.resources;
+ }
+ @Override
+ public URL getResource(String key) {
+ initResources();
+ return this.resources.get(key);
+ }
+
+ private void initResources() {
+ if (this.resources != null) {
+ return;
+ }
+ ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
+ List<StringURLMapProto> mapAsList = p.getResourcesList();
+ this.resources = new HashMap<String, URL>();
+
+ for (StringURLMapProto c : mapAsList) {
+ this.resources.put(c.getKey(), convertFromProtoFormat(c.getValue()));
+ }
+ }
+
+ @Override
+ public void addAllResources(final Map<String, URL> resources) {
+ if (resources == null)
+ return;
+ initResources();
+ this.resources.putAll(resources);
+ }
+
+ private void addResourcesToProto() {
+ maybeInitBuilder();
+ builder.clearResources();
+ if (this.resources == null)
+ return;
+ Iterable<StringURLMapProto> iterable = new Iterable<StringURLMapProto>() {
+
+ @Override
+ public Iterator<StringURLMapProto> iterator() {
+ return new Iterator<StringURLMapProto>() {
+
+ Iterator<String> keyIter = resources.keySet().iterator();
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public StringURLMapProto next() {
+ String key = keyIter.next();
+ return StringURLMapProto.newBuilder().setKey(key).setValue(convertToProtoFormat(resources.get(key))).build();
+ }
+
+ @Override
+ public boolean hasNext() {
+ return keyIter.hasNext();
+ }
+ };
+ }
+ };
+ builder.addAllResources(iterable);
+ }
+ @Override
+ public void setResource(String key, URL val) {
+ initResources();
+ this.resources.put(key, val);
+ }
+ @Override
+ public void removeResource(String key) {
+ initResources();
+ this.resources.remove(key);
+ }
+ @Override
+ public void clearResources() {
+ initResources();
+ this.resources.clear();
+ }
+ @Override
+ public Map<String, LocalResource> getAllResourcesTodo() {
+ initResourcesTodo();
+ return this.resourcesTodo;
+ }
+ @Override
+ public LocalResource getResourceTodo(String key) {
+ initResourcesTodo();
+ return this.resourcesTodo.get(key);
+ }
+
+ private void initResourcesTodo() {
+ if (this.resourcesTodo != null) {
+ return;
+ }
+ ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
+ List<StringLocalResourceMapProto> mapAsList = p.getResourcesTodoList();
+ this.resourcesTodo = new HashMap<String, LocalResource>();
+
+ for (StringLocalResourceMapProto c : mapAsList) {
+ this.resourcesTodo.put(c.getKey(), convertFromProtoFormat(c.getValue()));
+ }
+ }
+
+ @Override
+ public void addAllResourcesTodo(final Map<String, LocalResource> resourcesTodo) {
+ if (resourcesTodo == null)
+ return;
+ initResourcesTodo();
+ this.resourcesTodo.putAll(resourcesTodo);
+ }
+
+ private void addResourcesTodoToProto() {
+ maybeInitBuilder();
+ builder.clearResourcesTodo();
+ if (resourcesTodo == null)
+ return;
+ Iterable<StringLocalResourceMapProto> iterable = new Iterable<StringLocalResourceMapProto>() {
+
+ @Override
+ public Iterator<StringLocalResourceMapProto> iterator() {
+ return new Iterator<StringLocalResourceMapProto>() {
+
+ Iterator<String> keyIter = resourcesTodo.keySet().iterator();
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public StringLocalResourceMapProto next() {
+ String key = keyIter.next();
+ return StringLocalResourceMapProto.newBuilder().setKey(key).setValue(convertToProtoFormat(resourcesTodo.get(key))).build();
+ }
+
+ @Override
+ public boolean hasNext() {
+ return keyIter.hasNext();
+ }
+ };
+ }
+ };
+ builder.addAllResourcesTodo(iterable);
+ }
+ @Override
+ public void setResourceTodo(String key, LocalResource val) {
+ initResourcesTodo();
+ this.resourcesTodo.put(key, val);
+ }
+ @Override
+ public void removeResourceTodo(String key) {
+ initResourcesTodo();
+ this.resourcesTodo.remove(key);
+ }
+ @Override
+ public void clearResourcesTodo() {
+ initResourcesTodo();
+ this.resourcesTodo.clear();
+ }
+ @Override
+ public List<String> getFsTokenList() {
+ initFsTokenList();
+ return this.fsTokenList;
+ }
+ @Override
+ public String getFsToken(int index) {
+ initFsTokenList();
+ return this.fsTokenList.get(index);
+ }
+ @Override
+ public int getFsTokenCount() {
+ initFsTokenList();
+ return this.fsTokenList.size();
+ }
+
+ private void initFsTokenList() {
+ if (this.fsTokenList != null) {
+ return;
+ }
+ ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
+ List<String> list = p.getFsTokensList();
+ this.fsTokenList = new ArrayList<String>();
+
+ for (String c : list) {
+ this.fsTokenList.add(c);
+ }
+ }
+
+ @Override
+ public void addAllFsTokens(final List<String> fsTokens) {
+ if (fsTokens == null)
+ return;
+ initFsTokenList();
+ this.fsTokenList.addAll(fsTokens);
+ }
+
+ private void addFsTokenListToProto() {
+ maybeInitBuilder();
+ builder.clearFsTokens();
+ builder.addAllFsTokens(this.fsTokenList);
+ }
+
+ @Override
+ public void addFsToken(String fsTokens) {
+ initFsTokenList();
+ this.fsTokenList.add(fsTokens);
+ }
+ @Override
+ public void removeFsToken(int index) {
+ initFsTokenList();
+ this.fsTokenList.remove(index);
+ }
+ @Override
+ public void clearFsTokens() {
+ initFsTokenList();
+ this.fsTokenList.clear();
+ }
+ @Override
+ public ByteBuffer getFsTokensTodo() {
+ ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.fsTokenTodo != null) {
+ return this.fsTokenTodo;
+ }
+ if (!p.hasFsTokensTodo()) {
+ return null;
+ }
+ this.fsTokenTodo = convertFromProtoFormat(p.getFsTokensTodo());
+ return this.fsTokenTodo;
+ }
+
+ @Override
+ public void setFsTokensTodo(ByteBuffer fsTokensTodo) {
+ maybeInitBuilder();
+ if (fsTokensTodo == null)
+ builder.clearFsTokensTodo();
+ this.fsTokenTodo = fsTokensTodo;
+ }
+ @Override
+ public Map<String, String> getAllEnvironment() {
+ initEnvironment();
+ return this.environment;
+ }
+ @Override
+ public String getEnvironment(String key) {
+ initEnvironment();
+ return this.environment.get(key);
+ }
+
+ private void initEnvironment() {
+ if (this.environment != null) {
+ return;
+ }
+ ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
+ List<StringStringMapProto> mapAsList = p.getEnvironmentList();
+ this.environment = new HashMap<String, String>();
+
+ for (StringStringMapProto c : mapAsList) {
+ this.environment.put(c.getKey(), c.getValue());
+ }
+ }
+
+ @Override
+ public void addAllEnvironment(Map<String, String> environment) {
+ if (environment == null)
+ return;
+ initEnvironment();
+ this.environment.putAll(environment);
+ }
+
+ private void addEnvironmentToProto() {
+ maybeInitBuilder();
+ builder.clearEnvironment();
+ if (environment == null)
+ return;
+ Iterable<StringStringMapProto> iterable = new Iterable<StringStringMapProto>() {
+
+ @Override
+ public Iterator<StringStringMapProto> iterator() {
+ return new Iterator<StringStringMapProto>() {
+
+ Iterator<String> keyIter = environment.keySet().iterator();
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public StringStringMapProto next() {
+ String key = keyIter.next();
+ return StringStringMapProto.newBuilder().setKey(key).setValue((environment.get(key))).build();
+ }
+
+ @Override
+ public boolean hasNext() {
+ return keyIter.hasNext();
+ }
+ };
+ }
+ };
+ builder.addAllEnvironment(iterable);
+ }
+ @Override
+ public void setEnvironment(String key, String val) {
+ initEnvironment();
+ this.environment.put(key, val);
+ }
+ @Override
+ public void removeEnvironment(String key) {
+ initEnvironment();
+ this.environment.remove(key);
+ }
+ @Override
+ public void clearEnvironment() {
+ initEnvironment();
+ this.environment.clear();
+ }
+ @Override
+ public List<String> getCommandList() {
+ initCommandList();
+ return this.commandList;
+ }
+ @Override
+ public String getCommand(int index) {
+ initCommandList();
+ return this.commandList.get(index);
+ }
+ @Override
+ public int getCommandCount() {
+ initCommandList();
+ return this.commandList.size();
+ }
+
+ private void initCommandList() {
+ if (this.commandList != null) {
+ return;
+ }
+ ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
+ List<String> list = p.getCommandList();
+ this.commandList = new ArrayList<String>();
+
+ for (String c : list) {
+ this.commandList.add(c);
+ }
+ }
+
+ @Override
+ public void addAllCommands(final List<String> command) {
+ if (command == null)
+ return;
+ initCommandList();
+ this.commandList.addAll(command);
+ }
+
+ private void addCommandsToProto() {
+ maybeInitBuilder();
+ builder.clearCommand();
+ if (this.commandList == null)
+ return;
+ builder.addAllCommand(this.commandList);
+ }
+ @Override
+ public void addCommand(String command) {
+ initCommandList();
+ this.commandList.add(command);
+ }
+ @Override
+ public void removeCommand(int index) {
+ initCommandList();
+ this.commandList.remove(index);
+ }
+ @Override
+ public void clearCommands() {
+ initCommandList();
+ this.commandList.clear();
+ }
+ @Override
+ public String getQueue() {
+ ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasQueue()) {
+ return null;
+ }
+ return (p.getQueue());
+ }
+
+ @Override
+ public void setQueue(String queue) {
+ maybeInitBuilder();
+ if (queue == null) {
+ builder.clearQueue();
+ return;
+ }
+ builder.setQueue((queue));
+ }
+ @Override
+ public String getUser() {
+ ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasUser()) {
+ return null;
+ }
+ return (p.getUser());
+ }
+
+ @Override
+ public void setUser(String user) {
+ maybeInitBuilder();
+ if (user == null) {
+ builder.clearUser();
+ return;
+ }
+ builder.setUser((user));
+ }
+
+ private PriorityPBImpl convertFromProtoFormat(PriorityProto p) {
+ return new PriorityPBImpl(p);
+ }
+
+ private PriorityProto convertToProtoFormat(Priority t) {
+ return ((PriorityPBImpl)t).getProto();
+ }
+
+ private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) {
+ return new ApplicationIdPBImpl(p);
+ }
+
+ private ApplicationIdProto convertToProtoFormat(ApplicationId t) {
+ return ((ApplicationIdPBImpl)t).getProto();
+ }
+
+ private ResourcePBImpl convertFromProtoFormat(ResourceProto p) {
+ return new ResourcePBImpl(p);
+ }
+
+ private ResourceProto convertToProtoFormat(Resource t) {
+ return ((ResourcePBImpl)t).getProto();
+ }
+
+ private URLPBImpl convertFromProtoFormat(URLProto p) {
+ return new URLPBImpl(p);
+ }
+
+ private URLProto convertToProtoFormat(URL t) {
+ return ((URLPBImpl)t).getProto();
+ }
+
+ private LocalResourcePBImpl convertFromProtoFormat(LocalResourceProto p) {
+ return new LocalResourcePBImpl(p);
+ }
+
+ private LocalResourceProto convertToProtoFormat(LocalResource t) {
+ return ((LocalResourcePBImpl)t).getProto();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java
new file mode 100644
index 0000000..ebff16c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java
@@ -0,0 +1,179 @@
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+
+import java.text.NumberFormat;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProtoOrBuilder;
+
+
+
+public class ContainerIdPBImpl extends ProtoBase<ContainerIdProto> implements ContainerId {
+ ContainerIdProto proto = ContainerIdProto.getDefaultInstance();
+ ContainerIdProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ApplicationId applicationId = null;
+ private ApplicationAttemptId appAttemptId = null;
+ protected static final NumberFormat idFormat = NumberFormat.getInstance();
+ static {
+ idFormat.setGroupingUsed(false);
+ idFormat.setMinimumIntegerDigits(4);
+ }
+
+ protected static final NumberFormat counterFormat = NumberFormat.getInstance();
+ static {
+ counterFormat.setGroupingUsed(false);
+ counterFormat.setMinimumIntegerDigits(6);
+ }
+
+
+ public ContainerIdPBImpl() {
+ builder = ContainerIdProto.newBuilder();
+ }
+
+ public ContainerIdPBImpl(ContainerIdProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public ContainerIdProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.applicationId != null && !((ApplicationIdPBImpl)applicationId).getProto().equals(builder.getAppId())) {
+ builder.setAppId(convertToProtoFormat(this.applicationId));
+ }
+ if (this.appAttemptId != null && !((ApplicationAttemptIdPBImpl)appAttemptId).getProto().equals(builder.getAppAttemptId())) {
+ builder.setAppAttemptId(convertToProtoFormat(this.appAttemptId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = ContainerIdProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public int getId() {
+ ContainerIdProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getId());
+ }
+
+ @Override
+ public void setId(int id) {
+ maybeInitBuilder();
+ builder.setId((id));
+ }
+ @Override
+ public ApplicationId getAppId() {
+ ContainerIdProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.applicationId != null) {
+ return this.applicationId;
+ }
+ if (!p.hasAppId()) {
+ return null;
+ }
+ this.applicationId = convertFromProtoFormat(p.getAppId());
+ return this.applicationId;
+ }
+
+ @Override
+ public ApplicationAttemptId getAppAttemptId() {
+ ContainerIdProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.appAttemptId != null) {
+ return this.appAttemptId;
+ }
+ if (!p.hasAppAttemptId()) {
+ return null;
+ }
+ this.appAttemptId = convertFromProtoFormat(p.getAppAttemptId());
+ return this.appAttemptId;
+ }
+
+ @Override
+ public void setAppId(ApplicationId appId) {
+ maybeInitBuilder();
+ if (appId == null)
+ builder.clearAppId();
+ this.applicationId = appId;
+ }
+
+ @Override
+ public void setAppAttemptId(ApplicationAttemptId atId) {
+ maybeInitBuilder();
+ if (atId == null)
+ builder.clearAppAttemptId();
+ this.appAttemptId = atId;
+ }
+
+ private ApplicationAttemptIdPBImpl convertFromProtoFormat(ApplicationAttemptIdProto p) {
+ return new ApplicationAttemptIdPBImpl(p);
+ }
+
+ private ApplicationAttemptIdProto convertToProtoFormat(ApplicationAttemptId t) {
+ return ((ApplicationAttemptIdPBImpl)t).getProto();
+ }
+
+ private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) {
+ return new ApplicationIdPBImpl(p);
+ }
+
+ private ApplicationIdProto convertToProtoFormat(ApplicationId t) {
+ return ((ApplicationIdPBImpl)t).getProto();
+ }
+
+ @Override
+ public int hashCode() {
+ return getProto().hashCode();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == null) {
+ return false;
+ }
+ if (other.getClass().isAssignableFrom(this.getClass())) {
+ return this.getProto().equals(this.getClass().cast(other).getProto());
+ }
+ return false;
+ }
+
+ @Override
+ public int compareTo(ContainerId other) {
+ if (this.getAppId().compareTo(other.getAppId()) == 0) {
+ return this.getId() - other.getId();
+ } else {
+ return this.getAppId().compareTo(other.getAppId());
+ }
+
+ }
+
+ @Override
+ public String toString() {
+ String id = (this.getAppId() != null) ? this.getAppId().getClusterTimestamp() + "_" +
+ idFormat.format(this.getAppId().getId()): "none";
+ return "containerid_" + id + "_" + counterFormat.format(getId());
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java
new file mode 100644
index 0000000..07d1705
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java
@@ -0,0 +1,515 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerLaunchContextProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerLaunchContextProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.StringBytesMapProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.StringLocalResourceMapProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.StringStringMapProto;
+
+
+
+public class ContainerLaunchContextPBImpl extends ProtoBase<ContainerLaunchContextProto> implements ContainerLaunchContext {
+ ContainerLaunchContextProto proto = ContainerLaunchContextProto.getDefaultInstance();
+ ContainerLaunchContextProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ContainerId containerId = null;
+ private Resource resource = null;
+ private Map<String, LocalResource> localResources = null;
+ private ByteBuffer containerTokens = null;
+ private Map<String, ByteBuffer> serviceData = null;
+ private Map<String, String> env = null;
+ private List<String> commands = null;
+
+
+ public ContainerLaunchContextPBImpl() {
+ builder = ContainerLaunchContextProto.newBuilder();
+ }
+
+ public ContainerLaunchContextPBImpl(ContainerLaunchContextProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public ContainerLaunchContextProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.containerId != null && !((ContainerIdPBImpl)containerId).getProto().equals(builder.getContainerId())) {
+ builder.setContainerId(convertToProtoFormat(this.containerId));
+ }
+ if (this.resource != null && !((ResourcePBImpl)this.resource).getProto().equals(builder.getResource())) {
+ builder.setResource(convertToProtoFormat(this.resource));
+ }
+ if (this.localResources != null) {
+ addLocalResourcesToProto();
+ }
+ if (this.containerTokens != null) {
+ builder.setContainerTokens(convertToProtoFormat(this.containerTokens));
+ }
+ if (this.serviceData != null) {
+ addServiceDataToProto();
+ }
+ if (this.env != null) {
+ addEnvToProto();
+ }
+ if (this.commands != null) {
+ addCommandsToProto();
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = ContainerLaunchContextProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public Resource getResource() {
+ ContainerLaunchContextProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.resource != null) {
+ return this.resource;
+ }
+ if (!p.hasResource()) {
+ return null;
+ }
+ this.resource = convertFromProtoFormat(p.getResource());
+ return this.resource;
+ }
+
+ @Override
+ public void setResource(Resource resource) {
+ maybeInitBuilder();
+ if (resource == null)
+ builder.clearResource();
+ this.resource = resource;
+ }
+ @Override
+ public List<String> getCommandList() {
+ initCommands();
+ return this.commands;
+ }
+ @Override
+ public String getCommand(int index) {
+ initCommands();
+ return this.commands.get(index);
+ }
+ @Override
+ public int getCommandCount() {
+ initCommands();
+ return this.commands.size();
+ }
+
+ private void initCommands() {
+ if (this.commands != null) {
+ return;
+ }
+ ContainerLaunchContextProtoOrBuilder p = viaProto ? proto : builder;
+ List<String> list = p.getCommandList();
+ this.commands = new ArrayList<String>();
+
+ for (String c : list) {
+ this.commands.add(c);
+ }
+ }
+
+ @Override
+ public void addAllCommands(final List<String> command) {
+ if (command == null)
+ return;
+ initCommands();
+ this.commands.addAll(command);
+ }
+
+ private void addCommandsToProto() {
+ maybeInitBuilder();
+ builder.clearCommand();
+ if (this.commands == null)
+ return;
+ builder.addAllCommand(this.commands);
+ }
+ @Override
+ public void addCommand(String command) {
+ initCommands();
+ this.commands.add(command);
+ }
+ @Override
+ public void removeCommand(int index) {
+ initCommands();
+ this.commands.remove(index);
+ }
+ @Override
+ public void clearCommands() {
+ initCommands();
+ this.commands.clear();
+ }
+ @Override
+ public String getUser() {
+ ContainerLaunchContextProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasUser()) {
+ return null;
+ }
+ return (p.getUser());
+ }
+
+ @Override
+ public void setUser(String user) {
+ maybeInitBuilder();
+ if (user == null) {
+ builder.clearUser();
+ return;
+ }
+ builder.setUser((user));
+ }
+ @Override
+ public ContainerId getContainerId() {
+ ContainerLaunchContextProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.containerId != null) {
+ return this.containerId;
+ }
+ if (!p.hasContainerId()) {
+ return null;
+ }
+ this.containerId = convertFromProtoFormat(p.getContainerId());
+ return this.containerId;
+ }
+
+ @Override
+ public void setContainerId(ContainerId containerId) {
+ maybeInitBuilder();
+ if (containerId == null)
+ builder.clearContainerId();
+ this.containerId = containerId;
+ }
+ @Override
+ public Map<String, LocalResource> getAllLocalResources() {
+ initLocalResources();
+ return this.localResources;
+ }
+ @Override
+ public LocalResource getLocalResource(String key) {
+ initLocalResources();
+ return this.localResources.get(key);
+ }
+
+ private void initLocalResources() {
+ if (this.localResources != null) {
+ return;
+ }
+ ContainerLaunchContextProtoOrBuilder p = viaProto ? proto : builder;
+ List<StringLocalResourceMapProto> list = p.getLocalResourcesList();
+ this.localResources = new HashMap<String, LocalResource>();
+
+ for (StringLocalResourceMapProto c : list) {
+ this.localResources.put(c.getKey(), convertFromProtoFormat(c.getValue()));
+ }
+ }
+
+ @Override
+ public void addAllLocalResources(final Map<String, LocalResource> localResources) {
+ if (localResources == null)
+ return;
+ initLocalResources();
+ this.localResources.putAll(localResources);
+ }
+
+ private void addLocalResourcesToProto() {
+ maybeInitBuilder();
+ builder.clearLocalResources();
+ if (localResources == null)
+ return;
+ Iterable<StringLocalResourceMapProto> iterable = new Iterable<StringLocalResourceMapProto>() {
+
+ @Override
+ public Iterator<StringLocalResourceMapProto> iterator() {
+ return new Iterator<StringLocalResourceMapProto>() {
+
+ Iterator<String> keyIter = localResources.keySet().iterator();
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public StringLocalResourceMapProto next() {
+ String key = keyIter.next();
+ return StringLocalResourceMapProto.newBuilder().setKey(key).setValue(convertToProtoFormat(localResources.get(key))).build();
+ }
+
+ @Override
+ public boolean hasNext() {
+ return keyIter.hasNext();
+ }
+ };
+ }
+ };
+ builder.addAllLocalResources(iterable);
+ }
+ @Override
+ public void setLocalResource(String key, LocalResource val) {
+ initLocalResources();
+ this.localResources.put(key, val);
+ }
+ @Override
+ public void removeLocalResource(String key) {
+ initLocalResources();
+ this.localResources.remove(key);
+ }
+ @Override
+ public void clearLocalResources() {
+ initLocalResources();
+ this.localResources.clear();
+ }
+ @Override
+ public ByteBuffer getContainerTokens() {
+ ContainerLaunchContextProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.containerTokens != null) {
+ return this.containerTokens;
+ }
+ if (!p.hasContainerTokens()) {
+ return null;
+ }
+ this.containerTokens = convertFromProtoFormat(p.getContainerTokens());
+ return this.containerTokens;
+ }
+
+ @Override
+ public void setContainerTokens(ByteBuffer containerTokens) {
+ maybeInitBuilder();
+ if (containerTokens == null)
+ builder.clearContainerTokens();
+ this.containerTokens = containerTokens;
+ }
+ @Override
+ public Map<String, ByteBuffer> getAllServiceData() {
+ initServiceData();
+ return this.serviceData;
+ }
+ @Override
+ public ByteBuffer getServiceData(String key) {
+ initServiceData();
+ return this.serviceData.get(key);
+ }
+
+ private void initServiceData() {
+ if (this.serviceData != null) {
+ return;
+ }
+ ContainerLaunchContextProtoOrBuilder p = viaProto ? proto : builder;
+ List<StringBytesMapProto> list = p.getServiceDataList();
+ this.serviceData = new HashMap<String, ByteBuffer>();
+
+ for (StringBytesMapProto c : list) {
+ this.serviceData.put(c.getKey(), convertFromProtoFormat(c.getValue()));
+ }
+ }
+
+ @Override
+ public void addAllServiceData(final Map<String, ByteBuffer> serviceData) {
+ if (serviceData == null)
+ return;
+ initServiceData();
+ this.serviceData.putAll(serviceData);
+ }
+
+ private void addServiceDataToProto() {
+ maybeInitBuilder();
+ builder.clearServiceData();
+ if (serviceData == null)
+ return;
+ Iterable<StringBytesMapProto> iterable = new Iterable<StringBytesMapProto>() {
+
+ @Override
+ public Iterator<StringBytesMapProto> iterator() {
+ return new Iterator<StringBytesMapProto>() {
+
+ Iterator<String> keyIter = serviceData.keySet().iterator();
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public StringBytesMapProto next() {
+ String key = keyIter.next();
+ return StringBytesMapProto.newBuilder().setKey(key).setValue(convertToProtoFormat(serviceData.get(key))).build();
+ }
+
+ @Override
+ public boolean hasNext() {
+ return keyIter.hasNext();
+ }
+ };
+ }
+ };
+ builder.addAllServiceData(iterable);
+ }
+ @Override
+ public void setServiceData(String key, ByteBuffer val) {
+ initServiceData();
+ this.serviceData.put(key, val);
+ }
+ @Override
+ public void removeServiceData(String key) {
+ initServiceData();
+ this.serviceData.remove(key);
+ }
+ @Override
+ public void clearServiceData() {
+ initServiceData();
+ this.serviceData.clear();
+ }
+ @Override
+ public Map<String, String> getAllEnv() {
+ initEnv();
+ return this.env;
+ }
+ @Override
+ public String getEnv(String key) {
+ initEnv();
+ return this.env.get(key);
+ }
+
+ private void initEnv() {
+ if (this.env != null) {
+ return;
+ }
+ ContainerLaunchContextProtoOrBuilder p = viaProto ? proto : builder;
+ List<StringStringMapProto> list = p.getEnvList();
+ this.env = new HashMap<String, String>();
+
+ for (StringStringMapProto c : list) {
+ this.env.put(c.getKey(), c.getValue());
+ }
+ }
+
+ @Override
+ public void addAllEnv(final Map<String, String> env) {
+ if (env == null)
+ return;
+ initEnv();
+ this.env.putAll(env);
+ }
+
+ private void addEnvToProto() {
+ maybeInitBuilder();
+ builder.clearEnv();
+ if (env == null)
+ return;
+ Iterable<StringStringMapProto> iterable = new Iterable<StringStringMapProto>() {
+
+ @Override
+ public Iterator<StringStringMapProto> iterator() {
+ return new Iterator<StringStringMapProto>() {
+
+ Iterator<String> keyIter = env.keySet().iterator();
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public StringStringMapProto next() {
+ String key = keyIter.next();
+ return StringStringMapProto.newBuilder().setKey(key).setValue((env.get(key))).build();
+ }
+
+ @Override
+ public boolean hasNext() {
+ return keyIter.hasNext();
+ }
+ };
+ }
+ };
+ builder.addAllEnv(iterable);
+ }
+ @Override
+ public void setEnv(String key, String val) {
+ initEnv();
+ this.env.put(key, val);
+ }
+ @Override
+ public void removeEnv(String key) {
+ initEnv();
+ this.env.remove(key);
+ }
+ @Override
+ public void clearEnv() {
+ initEnv();
+ this.env.clear();
+ }
+
+ private ResourcePBImpl convertFromProtoFormat(ResourceProto p) {
+ return new ResourcePBImpl(p);
+ }
+
+ private ResourceProto convertToProtoFormat(Resource t) {
+ return ((ResourcePBImpl)t).getProto();
+ }
+
+ private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {
+ return new ContainerIdPBImpl(p);
+ }
+
+ private ContainerIdProto convertToProtoFormat(ContainerId t) {
+ return ((ContainerIdPBImpl)t).getProto();
+ }
+
+ private LocalResourcePBImpl convertFromProtoFormat(LocalResourceProto p) {
+ return new LocalResourcePBImpl(p);
+ }
+
+ private LocalResourceProto convertToProtoFormat(LocalResource t) {
+ return ((LocalResourcePBImpl)t).getProto();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
new file mode 100644
index 0000000..388cad0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
@@ -0,0 +1,326 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ContainerToken;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerTokenProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+import org.apache.hadoop.yarn.util.ProtoUtils;
+
+
+
+public class ContainerPBImpl extends ProtoBase<ContainerProto> implements Container {
+
+ ContainerProto proto = ContainerProto.getDefaultInstance();
+ ContainerProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ContainerId containerId = null;
+ private NodeId nodeId = null;
+ private Resource resource = null;
+ private ContainerToken containerToken = null;
+ private ContainerStatus containerStatus = null;
+
+ public ContainerPBImpl() {
+ builder = ContainerProto.newBuilder();
+ }
+
+ public ContainerPBImpl(ContainerProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public ContainerProto getProto() {
+
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.containerId != null
+ && !((ContainerIdPBImpl) containerId).getProto().equals(
+ builder.getId())) {
+ builder.setId(convertToProtoFormat(this.containerId));
+ }
+ if (this.nodeId != null
+ && !((NodeIdPBImpl) nodeId).getProto().equals(
+ builder.getNodeId())) {
+ builder.setNodeId(convertToProtoFormat(this.nodeId));
+ }
+ if (this.resource != null
+ && !((ResourcePBImpl) this.resource).getProto().equals(
+ builder.getResource())) {
+ builder.setResource(convertToProtoFormat(this.resource));
+ }
+ if (this.containerToken != null
+ && !((ContainerTokenPBImpl) this.containerToken).getProto().equals(
+ builder.getContainerToken())) {
+ builder.setContainerToken(convertToProtoFormat(this.containerToken));
+ }
+ if (this.containerStatus != null
+ && !((ContainerStatusPBImpl) this.containerStatus).getProto().equals(
+ builder.getContainerStatus())) {
+ builder.setContainerStatus(convertToProtoFormat(this.containerStatus));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = ContainerProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public ContainerState getState() {
+ ContainerProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasState()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getState());
+ }
+
+ @Override
+ public void setState(ContainerState state) {
+ maybeInitBuilder();
+ if (state == null) {
+ builder.clearState();
+ return;
+ }
+ builder.setState(convertToProtoFormat(state));
+ }
+ @Override
+ public ContainerId getId() {
+ ContainerProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.containerId != null) {
+ return this.containerId;
+ }
+ if (!p.hasId()) {
+ return null;
+ }
+ this.containerId = convertFromProtoFormat(p.getId());
+ return this.containerId;
+ }
+
+ @Override
+ public void setNodeId(NodeId nodeId) {
+ maybeInitBuilder();
+ if (nodeId == null)
+ builder.clearNodeId();
+ this.nodeId = nodeId;
+ }
+
+ @Override
+ public NodeId getNodeId() {
+ ContainerProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.nodeId != null) {
+ return this.nodeId;
+ }
+ if (!p.hasNodeId()) {
+ return null;
+ }
+ this.nodeId = convertFromProtoFormat(p.getNodeId());
+ return this.nodeId;
+ }
+
+ @Override
+ public void setId(ContainerId id) {
+ maybeInitBuilder();
+ if (id == null)
+ builder.clearId();
+ this.containerId = id;
+ }
+
+ @Override
+ public String getNodeHttpAddress() {
+ ContainerProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasNodeHttpAddress()) {
+ return null;
+ }
+ return (p.getNodeHttpAddress());
+ }
+
+ @Override
+ public void setNodeHttpAddress(String nodeHttpAddress) {
+ maybeInitBuilder();
+ if (nodeHttpAddress == null) {
+ builder.clearNodeHttpAddress();
+ return;
+ }
+ builder.setNodeHttpAddress(nodeHttpAddress);
+ }
+
+ @Override
+ public Resource getResource() {
+ ContainerProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.resource != null) {
+ return this.resource;
+ }
+ if (!p.hasResource()) {
+ return null;
+ }
+ this.resource = convertFromProtoFormat(p.getResource());
+ return this.resource;
+ }
+
+ @Override
+ public void setResource(Resource resource) {
+ maybeInitBuilder();
+ if (resource == null)
+ builder.clearResource();
+ this.resource = resource;
+ }
+ @Override
+ public ContainerToken getContainerToken() {
+ ContainerProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.containerToken != null) {
+ return this.containerToken;
+ }
+ if (!p.hasContainerToken()) {
+ return null;
+ }
+ this.containerToken = convertFromProtoFormat(p.getContainerToken());
+ return this.containerToken;
+ }
+
+ @Override
+ public void setContainerToken(ContainerToken containerToken) {
+ maybeInitBuilder();
+ if (containerToken == null)
+ builder.clearContainerToken();
+ this.containerToken = containerToken;
+ }
+
+ @Override
+ public ContainerStatus getContainerStatus() {
+ ContainerProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.containerStatus != null) {
+ return this.containerStatus;
+ }
+ if (!p.hasContainerStatus()) {
+ return null;
+ }
+ this.containerStatus = convertFromProtoFormat(p.getContainerStatus());
+ return this.containerStatus;
+ }
+
+ @Override
+ public void setContainerStatus(ContainerStatus containerStatus) {
+ maybeInitBuilder();
+ if (containerStatus == null)
+ builder.clearContainerStatus();
+ this.containerStatus = containerStatus;
+ }
+
+ private ContainerStateProto convertToProtoFormat(ContainerState e) {
+ return ProtoUtils.convertToProtoFormat(e);
+ }
+
+ private ContainerState convertFromProtoFormat(ContainerStateProto e) {
+ return ProtoUtils.convertFromProtoFormat(e);
+ }
+
+ private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {
+ return new ContainerIdPBImpl(p);
+ }
+
+ private NodeIdPBImpl convertFromProtoFormat(NodeIdProto p) {
+ return new NodeIdPBImpl(p);
+ }
+
+ private ContainerIdProto convertToProtoFormat(ContainerId t) {
+ return ((ContainerIdPBImpl)t).getProto();
+ }
+
+ private NodeIdProto convertToProtoFormat(NodeId t) {
+ return ((NodeIdPBImpl)t).getProto();
+ }
+
+ private ResourcePBImpl convertFromProtoFormat(ResourceProto p) {
+ return new ResourcePBImpl(p);
+ }
+
+ private ResourceProto convertToProtoFormat(Resource t) {
+ return ((ResourcePBImpl)t).getProto();
+ }
+
+ private ContainerTokenPBImpl convertFromProtoFormat(ContainerTokenProto p) {
+ return new ContainerTokenPBImpl(p);
+ }
+
+ private ContainerTokenProto convertToProtoFormat(ContainerToken t) {
+ return ((ContainerTokenPBImpl)t).getProto();
+ }
+
+ private ContainerStatusPBImpl convertFromProtoFormat(ContainerStatusProto p) {
+ return new ContainerStatusPBImpl(p);
+ }
+
+ private ContainerStatusProto convertToProtoFormat(ContainerStatus t) {
+ return ((ContainerStatusPBImpl)t).getProto();
+ }
+
+ //TODO Comparator
+ @Override
+ public int compareTo(Container other) {
+ if (this.getId().compareTo(other.getId()) == 0) {
+ if (this.getNodeId().compareTo(other.getNodeId()) == 0) {
+ if (this.getResource().compareTo(other.getResource()) == 0) {
+ if (this.getState().compareTo(other.getState()) == 0) {
+ //ContainerToken
+ return this.getState().compareTo(other.getState());
+ } else {
+ return this.getState().compareTo(other.getState());
+ }
+ } else {
+ return this.getResource().compareTo(other.getResource());
+ }
+ } else {
+ return this.getNodeId().compareTo(other.getNodeId());
+ }
+ } else {
+ return this.getId().compareTo(other.getId());
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java
new file mode 100644
index 0000000..1c6ccc4
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java
@@ -0,0 +1,142 @@
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProtoOrBuilder;
+import org.apache.hadoop.yarn.util.ProtoUtils;
+
+
+
+public class ContainerStatusPBImpl extends ProtoBase<ContainerStatusProto> implements ContainerStatus {
+ ContainerStatusProto proto = ContainerStatusProto.getDefaultInstance();
+ ContainerStatusProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ContainerId containerId = null;
+
+
+ public ContainerStatusPBImpl() {
+ builder = ContainerStatusProto.newBuilder();
+ }
+
+ public ContainerStatusPBImpl(ContainerStatusProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public ContainerStatusProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (containerId != null) {
+ builder.setContainerId(convertToProtoFormat(this.containerId));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = ContainerStatusProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public ContainerState getState() {
+ ContainerStatusProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasState()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getState());
+ }
+
+ @Override
+ public void setState(ContainerState state) {
+ maybeInitBuilder();
+ if (state == null) {
+ builder.clearState();
+ return;
+ }
+ builder.setState(convertToProtoFormat(state));
+ }
+ @Override
+ public ContainerId getContainerId() {
+ ContainerStatusProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.containerId != null) {
+ return this.containerId;
+ }
+ if (!p.hasContainerId()) {
+ return null;
+ }
+ this.containerId = convertFromProtoFormat(p.getContainerId());
+ return this.containerId;
+ }
+
+ @Override
+ public void setContainerId(ContainerId containerId) {
+ maybeInitBuilder();
+ if (containerId == null)
+ builder.clearContainerId();
+ this.containerId = containerId;
+ }
+ @Override
+ public String getExitStatus() {
+ ContainerStatusProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getExitStatus());
+ }
+
+ @Override
+ public void setExitStatus(String exitStatus) {
+ maybeInitBuilder();
+ builder.setExitStatus(exitStatus);
+ }
+
+ @Override
+ public String getDiagnostics() {
+ ContainerStatusProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getDiagnostics());
+ }
+
+ @Override
+ public void setDiagnostics(String diagnostics) {
+ maybeInitBuilder();
+ builder.setDiagnostics(diagnostics);
+ }
+
+ private ContainerStateProto convertToProtoFormat(ContainerState e) {
+ return ProtoUtils.convertToProtoFormat(e);
+ }
+
+ private ContainerState convertFromProtoFormat(ContainerStateProto e) {
+ return ProtoUtils.convertFromProtoFormat(e);
+ }
+
+ private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {
+ return new ContainerIdPBImpl(p);
+ }
+
+ private ContainerIdProto convertToProtoFormat(ContainerId t) {
+ return ((ContainerIdPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerTokenPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerTokenPBImpl.java
new file mode 100644
index 0000000..9cf37f6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerTokenPBImpl.java
@@ -0,0 +1,140 @@
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.yarn.api.records.ContainerToken;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerTokenProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerTokenProtoOrBuilder;
+
+
+
+public class ContainerTokenPBImpl extends ProtoBase<ContainerTokenProto> implements ContainerToken {
+ private ContainerTokenProto proto = ContainerTokenProto.getDefaultInstance();
+ private ContainerTokenProto.Builder builder = null;
+ private boolean viaProto = false;
+
+ private ByteBuffer identifier;
+ private ByteBuffer password;
+
+
+ public ContainerTokenPBImpl() {
+ builder = ContainerTokenProto.newBuilder();
+ }
+
+ public ContainerTokenPBImpl(ContainerTokenProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public synchronized ContainerTokenProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private synchronized void mergeLocalToBuilder() {
+ if (this.identifier != null) {
+ builder.setIdentifier(convertToProtoFormat(this.identifier));
+ }
+ if (this.password != null) {
+ builder.setPassword(convertToProtoFormat(this.password));
+ }
+ }
+
+ private synchronized void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private synchronized void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = ContainerTokenProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public synchronized ByteBuffer getIdentifier() {
+ ContainerTokenProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.identifier != null) {
+ return this.identifier;
+ }
+ if (!p.hasIdentifier()) {
+ return null;
+ }
+ this.identifier = convertFromProtoFormat(p.getIdentifier());
+ return this.identifier;
+ }
+
+ @Override
+ public synchronized void setIdentifier(ByteBuffer identifier) {
+ maybeInitBuilder();
+ if (identifier == null)
+ builder.clearIdentifier();
+ this.identifier = identifier;
+ }
+ @Override
+ public synchronized ByteBuffer getPassword() {
+ ContainerTokenProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.password != null) {
+ return this.password;
+ }
+ if (!p.hasPassword()) {
+ return null;
+ }
+ this.password = convertFromProtoFormat(p.getPassword());
+ return this.password;
+ }
+
+ @Override
+ public synchronized void setPassword(ByteBuffer password) {
+ maybeInitBuilder();
+ if (password == null)
+ builder.clearPassword();
+ this.password = password;
+ }
+ @Override
+ public synchronized String getKind() {
+ ContainerTokenProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasKind()) {
+ return null;
+ }
+ return (p.getKind());
+ }
+
+ @Override
+ public synchronized void setKind(String kind) {
+ maybeInitBuilder();
+ if (kind == null) {
+ builder.clearKind();
+ return;
+ }
+ builder.setKind((kind));
+ }
+ @Override
+ public synchronized String getService() {
+ ContainerTokenProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasService()) {
+ return null;
+ }
+ return (p.getService());
+ }
+
+ @Override
+ public synchronized void setService(String service) {
+ maybeInitBuilder();
+ if (service == null) {
+ builder.clearService();
+ return;
+ }
+ builder.setService((service));
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/LocalResourcePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/LocalResourcePBImpl.java
new file mode 100644
index 0000000..4dec36b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/LocalResourcePBImpl.java
@@ -0,0 +1,186 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceTypeProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceVisibilityProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.URLProto;
+import org.apache.hadoop.yarn.util.ProtoUtils;
+
+
+
+public class LocalResourcePBImpl extends ProtoBase<LocalResourceProto> implements LocalResource {
+ LocalResourceProto proto = LocalResourceProto.getDefaultInstance();
+ LocalResourceProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private URL url = null;
+
+
+ public LocalResourcePBImpl() {
+ builder = LocalResourceProto.newBuilder();
+ }
+
+ public LocalResourcePBImpl(LocalResourceProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public LocalResourceProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.url != null) {
+ builder.setResource(convertToProtoFormat(this.url));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = LocalResourceProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public long getSize() {
+ LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getSize());
+ }
+
+ @Override
+ public void setSize(long size) {
+ maybeInitBuilder();
+ builder.setSize((size));
+ }
+ @Override
+ public long getTimestamp() {
+ LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getTimestamp());
+ }
+
+ @Override
+ public void setTimestamp(long timestamp) {
+ maybeInitBuilder();
+ builder.setTimestamp((timestamp));
+ }
+ @Override
+ public LocalResourceType getType() {
+ LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasType()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getType());
+ }
+
+ @Override
+ public void setType(LocalResourceType type) {
+ maybeInitBuilder();
+ if (type == null) {
+ builder.clearType();
+ return;
+ }
+ builder.setType(convertToProtoFormat(type));
+ }
+ @Override
+ public URL getResource() {
+ LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.url != null) {
+ return this.url;
+ }
+ if (!p.hasResource()) {
+ return null;
+ }
+ this.url = convertFromProtoFormat(p.getResource());
+ return this.url;
+ }
+
+ @Override
+ public void setResource(URL resource) {
+ maybeInitBuilder();
+ if (resource == null)
+ builder.clearResource();
+ this.url = resource;
+ }
+ @Override
+ public LocalResourceVisibility getVisibility() {
+ LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasVisibility()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getVisibility());
+ }
+
+ @Override
+ public void setVisibility(LocalResourceVisibility visibility) {
+ maybeInitBuilder();
+ if (visibility == null) {
+ builder.clearVisibility();
+ return;
+ }
+ builder.setVisibility(convertToProtoFormat(visibility));
+ }
+
+ private LocalResourceTypeProto convertToProtoFormat(LocalResourceType e) {
+ return ProtoUtils.convertToProtoFormat(e);
+ }
+
+ private LocalResourceType convertFromProtoFormat(LocalResourceTypeProto e) {
+ return ProtoUtils.convertFromProtoFormat(e);
+ }
+
+ private URLPBImpl convertFromProtoFormat(URLProto p) {
+ return new URLPBImpl(p);
+ }
+
+ private URLProto convertToProtoFormat(URL t) {
+ return ((URLPBImpl)t).getProto();
+ }
+
+ private LocalResourceVisibilityProto convertToProtoFormat(LocalResourceVisibility e) {
+ return ProtoUtils.convertToProtoFormat(e);
+ }
+
+ private LocalResourceVisibility convertFromProtoFormat(LocalResourceVisibilityProto e) {
+ return ProtoUtils.convertFromProtoFormat(e);
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeHealthStatusPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeHealthStatusPBImpl.java
new file mode 100644
index 0000000..5dfda97
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeHealthStatusPBImpl.java
@@ -0,0 +1,93 @@
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeHealthStatusProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeHealthStatusProtoOrBuilder;
+
+public class NodeHealthStatusPBImpl extends ProtoBase<NodeHealthStatusProto>
+ implements NodeHealthStatus {
+
+ private NodeHealthStatusProto.Builder builder;
+ private boolean viaProto = false;
+ private NodeHealthStatusProto proto = NodeHealthStatusProto
+ .getDefaultInstance();
+
+ public NodeHealthStatusPBImpl() {
+ this.builder = NodeHealthStatusProto.newBuilder();
+ }
+
+ public NodeHealthStatusPBImpl(NodeHealthStatusProto proto) {
+ this.proto = proto;
+ this.viaProto = true;
+ }
+
+ public NodeHealthStatusProto getProto() {
+ mergeLocalToProto();
+ this.proto = this.viaProto ? this.proto : this.builder.build();
+ this.viaProto = true;
+ return this.proto;
+ }
+
+ private void mergeLocalToProto() {
+ if (this.viaProto)
+ maybeInitBuilder();
+ this.proto = this.builder.build();
+
+ this.viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (this.viaProto || this.builder == null) {
+ this.builder = NodeHealthStatusProto.newBuilder(this.proto);
+ }
+ this.viaProto = false;
+ }
+
+ @Override
+ public boolean getIsNodeHealthy() {
+ NodeHealthStatusProtoOrBuilder p =
+ this.viaProto ? this.proto : this.builder;
+ return p.getIsNodeHealthy();
+ }
+
+ @Override
+ public void setIsNodeHealthy(boolean isNodeHealthy) {
+ maybeInitBuilder();
+ this.builder.setIsNodeHealthy(isNodeHealthy);
+ }
+
+ @Override
+ public String getHealthReport() {
+ NodeHealthStatusProtoOrBuilder p =
+ this.viaProto ? this.proto : this.builder;
+ if (!p.hasHealthReport()) {
+ return null;
+ }
+ return (p.getHealthReport());
+ }
+
+ @Override
+ public void setHealthReport(String healthReport) {
+ maybeInitBuilder();
+ if (healthReport == null) {
+ this.builder.clearHealthReport();
+ return;
+ }
+ this.builder.setHealthReport((healthReport));
+ }
+
+ @Override
+ public long getLastHealthReportTime() {
+ NodeHealthStatusProtoOrBuilder p =
+ this.viaProto ? this.proto : this.builder;
+ return (p.getLastHealthReportTime());
+ }
+
+ @Override
+ public void setLastHealthReportTime(long lastHealthReport) {
+ maybeInitBuilder();
+ this.builder.setLastHealthReportTime((lastHealthReport));
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeIdPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeIdPBImpl.java
new file mode 100644
index 0000000..664bfb9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeIdPBImpl.java
@@ -0,0 +1,112 @@
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProtoOrBuilder;
+
+
+
+public class NodeIdPBImpl extends ProtoBase<NodeIdProto> implements NodeId {
+ NodeIdProto proto = NodeIdProto.getDefaultInstance();
+ NodeIdProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public NodeIdPBImpl() {
+ builder = NodeIdProto.newBuilder();
+ }
+
+ public NodeIdPBImpl(NodeIdProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public NodeIdProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = NodeIdProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ @Override
+ public String getHost() {
+ NodeIdProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getHost());
+ }
+
+ @Override
+ public void setHost(String host) {
+ maybeInitBuilder();
+ builder.setHost((host));
+ }
+
+ @Override
+ public int getPort() {
+ NodeIdProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getPort());
+ }
+
+ @Override
+ public void setPort(int port) {
+ maybeInitBuilder();
+ builder.setPort((port));
+ }
+
+ @Override
+ public String toString() {
+ return this.getHost() + ":" + this.getPort();
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = super.hashCode();
+ String host = this.getHost();
+ result = prime * result + ((host == null) ? 0 : host.hashCode());
+ result = prime * result + this.getPort();
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (!super.equals(obj))
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ NodeIdPBImpl other = (NodeIdPBImpl) obj;
+ String host = this.getHost();
+ String otherHost = other.getHost();
+ if (host == null) {
+ if (otherHost != null)
+ return false;
+ } else if (!host.equals(otherHost))
+ return false;
+ if (this.getPort() != other.getPort())
+ return false;
+ return true;
+ }
+
+ @Override
+ public int compareTo(NodeId other) {
+ int hostCompare = this.getHost().compareTo(other.getHost());
+ if (hostCompare == 0) {
+ if (this.getPort() > other.getPort()) {
+ return 1;
+ } else if (this.getPort() < other.getPort()) {
+ return -1;
+ }
+ return 0;
+ }
+ return hostCompare;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeReportPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeReportPBImpl.java
new file mode 100644
index 0000000..2af58f6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeReportPBImpl.java
@@ -0,0 +1,240 @@
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeHealthStatusProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+
+public class NodeReportPBImpl extends ProtoBase<NodeReportProto>
+ implements NodeReport {
+
+ private NodeReportProto proto = NodeReportProto.getDefaultInstance();
+ private NodeReportProto.Builder builder = null;
+ private boolean viaProto = false;
+ private NodeId nodeId;
+ private Resource used;
+ private Resource capability;
+ private NodeHealthStatus nodeHealthStatus;
+
+ public NodeReportPBImpl() {
+ builder = NodeReportProto.newBuilder();
+ }
+
+ public NodeReportPBImpl(NodeReportProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ @Override
+ public Resource getCapability() {
+ if (this.capability != null) {
+ return this.capability;
+ }
+
+ NodeReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasCapability()) {
+ return null;
+ }
+ this.capability = convertFromProtoFormat(p.getCapability());
+ return this.capability;
+ }
+
+ @Override
+ public NodeHealthStatus getNodeHealthStatus() {
+ if (this.nodeHealthStatus != null) {
+ return this.nodeHealthStatus;
+ }
+
+ NodeReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasNodeHealthStatus()) {
+ return null;
+ }
+ this.nodeHealthStatus = convertFromProtoFormat(p.getNodeHealthStatus());
+ return this.nodeHealthStatus;
+ }
+
+ @Override
+ public String getHttpAddress() {
+ NodeReportProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.hasHttpAddress()) ? p.getHttpAddress() : null;
+ }
+
+ @Override
+ public int getNumContainers() {
+ NodeReportProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.hasNumContainers()) ? p.getNumContainers() : 0;
+ }
+
+ @Override
+ public String getRackName() {
+ NodeReportProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.hasRackName()) ? p.getRackName() : null;
+ }
+
+ @Override
+ public Resource getUsed() {
+ if (this.used != null) {
+ return this.used;
+ }
+
+ NodeReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasUsed()) {
+ return null;
+ }
+ this.used = convertFromProtoFormat(p.getUsed());
+ return this.used;
+ }
+
+ @Override
+ public NodeId getNodeId() {
+ if (this.nodeId != null) {
+ return this.nodeId;
+ }
+
+ NodeReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasNodeId()) {
+ return null;
+ }
+ this.nodeId = convertFromProtoFormat(p.getNodeId());
+ return this.nodeId;
+ }
+
+ @Override
+ public void setNodeId(NodeId nodeId) {
+ maybeInitBuilder();
+ if (nodeId == null)
+ builder.clearNodeId();
+ this.nodeId = nodeId;
+ }
+
+ @Override
+ public void setCapability(Resource capability) {
+ maybeInitBuilder();
+ if (capability == null)
+ builder.clearCapability();
+ this.capability = capability;
+ }
+
+ @Override
+ public void setNodeHealthStatus(NodeHealthStatus healthStatus) {
+ maybeInitBuilder();
+ if (healthStatus == null)
+ builder.clearNodeHealthStatus();
+ this.nodeHealthStatus = healthStatus;
+ }
+
+ @Override
+ public void setHttpAddress(String httpAddress) {
+ maybeInitBuilder();
+ if (httpAddress == null) {
+ builder.clearHttpAddress();
+ return;
+ }
+ builder.setHttpAddress(httpAddress);
+ }
+
+ @Override
+ public void setNumContainers(int numContainers) {
+ maybeInitBuilder();
+ if (numContainers == 0) {
+ builder.clearNumContainers();
+ return;
+ }
+ builder.setNumContainers(numContainers);
+ }
+
+ @Override
+ public void setRackName(String rackName) {
+ maybeInitBuilder();
+ if (rackName == null) {
+ builder.clearRackName();
+ return;
+ }
+ builder.setRackName(rackName);
+ }
+
+ @Override
+ public void setUsed(Resource used) {
+ maybeInitBuilder();
+ if (used == null)
+ builder.clearUsed();
+ this.used = used;
+ }
+
+ @Override
+ public NodeReportProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.nodeId != null
+ && !((NodeIdPBImpl) this.nodeId).getProto().equals(
+ builder.getNodeId())) {
+ builder.setNodeId(convertToProtoFormat(this.nodeId));
+ }
+ if (this.used != null
+ && !((ResourcePBImpl) this.used).getProto().equals(
+ builder.getUsed())) {
+ builder.setUsed(convertToProtoFormat(this.used));
+ }
+ if (this.capability != null
+ && !((ResourcePBImpl) this.capability).getProto().equals(
+ builder.getCapability())) {
+ builder.setCapability(convertToProtoFormat(this.capability));
+ }
+ if (this.nodeHealthStatus != null
+ && !((NodeHealthStatusPBImpl) this.nodeHealthStatus).getProto().equals(
+ builder.getNodeHealthStatus())) {
+ builder.setNodeHealthStatus(convertToProtoFormat(this.nodeHealthStatus));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = NodeReportProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ private NodeIdPBImpl convertFromProtoFormat(NodeIdProto p) {
+ return new NodeIdPBImpl(p);
+ }
+
+ private NodeIdProto convertToProtoFormat(NodeId nodeId) {
+ return ((NodeIdPBImpl) nodeId).getProto();
+ }
+
+ private ResourcePBImpl convertFromProtoFormat(ResourceProto p) {
+ return new ResourcePBImpl(p);
+ }
+
+ private ResourceProto convertToProtoFormat(Resource r) {
+ return ((ResourcePBImpl) r).getProto();
+ }
+
+ private NodeHealthStatusPBImpl convertFromProtoFormat(NodeHealthStatusProto p) {
+ return new NodeHealthStatusPBImpl(p);
+ }
+
+ private NodeHealthStatusProto convertToProtoFormat(NodeHealthStatus r) {
+ return ((NodeHealthStatusPBImpl) r).getProto();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PriorityPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PriorityPBImpl.java
new file mode 100644
index 0000000..f715b06
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PriorityPBImpl.java
@@ -0,0 +1,58 @@
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProtoOrBuilder;
+
+
+
+public class PriorityPBImpl extends ProtoBase<PriorityProto> implements Priority {
+ PriorityProto proto = PriorityProto.getDefaultInstance();
+ PriorityProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public PriorityPBImpl() {
+ builder = PriorityProto.newBuilder();
+ }
+
+ public PriorityPBImpl(PriorityProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public PriorityProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = PriorityProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public int getPriority() {
+ PriorityProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getPriority());
+ }
+
+ @Override
+ public void setPriority(int priority) {
+ maybeInitBuilder();
+ builder.setPriority((priority));
+ }
+
+
+ @Override
+ public int compareTo(Priority other) {
+ return this.getPriority() - other.getPriority();
+ }
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java
new file mode 100644
index 0000000..9c89f0e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java
@@ -0,0 +1,286 @@
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.api.records.QueueState;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.QueueInfoProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.QueueInfoProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.QueueStateProto;
+import org.apache.hadoop.yarn.util.ProtoUtils;
+
+public class QueueInfoPBImpl extends ProtoBase<QueueInfoProto> implements
+ QueueInfo {
+
+ QueueInfoProto proto = QueueInfoProto.getDefaultInstance();
+ QueueInfoProto.Builder builder = null;
+ boolean viaProto = false;
+
+ List<ApplicationReport> applicationsList;
+ List<QueueInfo> childQueuesList;
+
+ public QueueInfoPBImpl() {
+ builder = QueueInfoProto.newBuilder();
+ }
+
+ public QueueInfoPBImpl(QueueInfoProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ @Override
+ public List<ApplicationReport> getApplications() {
+ initLocalApplicationsList();
+ return this.applicationsList;
+ }
+
+ @Override
+ public float getCapacity() {
+ QueueInfoProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.hasCapacity()) ? p.getCapacity() : -1;
+ }
+
+ @Override
+ public List<QueueInfo> getChildQueues() {
+ initLocalChildQueuesList();
+ return this.childQueuesList;
+ }
+
+ @Override
+ public float getCurrentCapacity() {
+ QueueInfoProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.hasCurrentCapacity()) ? p.getCurrentCapacity() : 0;
+ }
+
+ @Override
+ public float getMaximumCapacity() {
+ QueueInfoProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.hasMaximumCapacity()) ? p.getMaximumCapacity() : -1;
+ }
+
+ @Override
+ public String getQueueName() {
+ QueueInfoProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.hasQueueName()) ? p.getQueueName() : null;
+ }
+
+ @Override
+ public QueueState getQueueState() {
+ QueueInfoProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasState()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getState());
+ }
+
+ @Override
+ public void setApplications(List<ApplicationReport> applications) {
+ if (applications == null) {
+ builder.clearApplications();
+ }
+ this.applicationsList = applications;
+ }
+
+ @Override
+ public void setCapacity(float capacity) {
+ maybeInitBuilder();
+ builder.setCapacity(capacity);
+ }
+
+ @Override
+ public void setChildQueues(List<QueueInfo> childQueues) {
+ if (childQueues == null) {
+ builder.clearChildQueues();
+ }
+ this.childQueuesList = childQueues;
+ }
+
+ @Override
+ public void setCurrentCapacity(float currentCapacity) {
+ maybeInitBuilder();
+ builder.setCurrentCapacity(currentCapacity);
+ }
+
+ @Override
+ public void setMaximumCapacity(float maximumCapacity) {
+ maybeInitBuilder();
+ builder.setMaximumCapacity(maximumCapacity);
+ }
+
+ @Override
+ public void setQueueName(String queueName) {
+ maybeInitBuilder();
+ if (queueName == null) {
+ builder.clearQueueName();
+ return;
+ }
+ builder.setQueueName(queueName);
+ }
+
+ @Override
+ public void setQueueState(QueueState queueState) {
+ maybeInitBuilder();
+ if (queueState == null) {
+ builder.clearState();
+ return;
+ }
+ builder.setState(convertToProtoFormat(queueState));
+ }
+
+ @Override
+ public QueueInfoProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void initLocalApplicationsList() {
+ if (this.applicationsList != null) {
+ return;
+ }
+ QueueInfoProtoOrBuilder p = viaProto ? proto : builder;
+ List<ApplicationReportProto> list = p.getApplicationsList();
+ applicationsList = new ArrayList<ApplicationReport>();
+
+ for (ApplicationReportProto a : list) {
+ applicationsList.add(convertFromProtoFormat(a));
+ }
+ }
+
+ private void addApplicationsToProto() {
+ maybeInitBuilder();
+ builder.clearApplications();
+ if (applicationsList == null)
+ return;
+ Iterable<ApplicationReportProto> iterable = new Iterable<ApplicationReportProto>() {
+ @Override
+ public Iterator<ApplicationReportProto> iterator() {
+ return new Iterator<ApplicationReportProto>() {
+
+ Iterator<ApplicationReport> iter = applicationsList.iterator();
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+
+ @Override
+ public ApplicationReportProto next() {
+ return convertToProtoFormat(iter.next());
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+
+ }
+ };
+
+ }
+ };
+ builder.addAllApplications(iterable);
+ }
+
+ private void initLocalChildQueuesList() {
+ if (this.childQueuesList != null) {
+ return;
+ }
+ QueueInfoProtoOrBuilder p = viaProto ? proto : builder;
+ List<QueueInfoProto> list = p.getChildQueuesList();
+ childQueuesList = new ArrayList<QueueInfo>();
+
+ for (QueueInfoProto a : list) {
+ childQueuesList.add(convertFromProtoFormat(a));
+ }
+ }
+
+ private void addChildQueuesInfoToProto() {
+ maybeInitBuilder();
+ builder.clearChildQueues();
+ if (childQueuesList == null)
+ return;
+ Iterable<QueueInfoProto> iterable = new Iterable<QueueInfoProto>() {
+ @Override
+ public Iterator<QueueInfoProto> iterator() {
+ return new Iterator<QueueInfoProto>() {
+
+ Iterator<QueueInfo> iter = childQueuesList.iterator();
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+
+ @Override
+ public QueueInfoProto next() {
+ return convertToProtoFormat(iter.next());
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+
+ }
+ };
+
+ }
+ };
+ builder.addAllChildQueues(iterable);
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.childQueuesList != null) {
+ addChildQueuesInfoToProto();
+ }
+ if (this.applicationsList != null) {
+ addApplicationsToProto();
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = QueueInfoProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ private ApplicationReportPBImpl convertFromProtoFormat(ApplicationReportProto a) {
+ return new ApplicationReportPBImpl(a);
+ }
+
+ private ApplicationReportProto convertToProtoFormat(ApplicationReport t) {
+ return ((ApplicationReportPBImpl)t).getProto();
+ }
+
+ private QueueInfoPBImpl convertFromProtoFormat(QueueInfoProto a) {
+ return new QueueInfoPBImpl(a);
+ }
+
+ private QueueInfoProto convertToProtoFormat(QueueInfo q) {
+ return ((QueueInfoPBImpl)q).getProto();
+ }
+
+ private QueueState convertFromProtoFormat(QueueStateProto q) {
+ return ProtoUtils.convertFromProtoFormat(q);
+ }
+
+ private QueueStateProto convertToProtoFormat(QueueState queueState) {
+ return ProtoUtils.convertToProtoFormat(queueState);
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueUserACLInfoPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueUserACLInfoPBImpl.java
new file mode 100644
index 0000000..cd56927
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueUserACLInfoPBImpl.java
@@ -0,0 +1,147 @@
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.QueueACL;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.proto.YarnProtos.QueueACLProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.QueueUserACLInfoProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.QueueUserACLInfoProtoOrBuilder;
+import org.apache.hadoop.yarn.util.ProtoUtils;
+
+public class QueueUserACLInfoPBImpl extends ProtoBase<QueueUserACLInfoProto>
+implements QueueUserACLInfo {
+
+ QueueUserACLInfoProto proto = QueueUserACLInfoProto.getDefaultInstance();
+ QueueUserACLInfoProto.Builder builder = null;
+ boolean viaProto = false;
+
+ List<QueueACL> userAclsList;
+
+ public QueueUserACLInfoPBImpl() {
+ builder = QueueUserACLInfoProto.newBuilder();
+ }
+
+ public QueueUserACLInfoPBImpl(QueueUserACLInfoProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ @Override
+ public String getQueueName() {
+ QueueUserACLInfoProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.hasQueueName()) ? p.getQueueName() : null;
+ }
+
+ @Override
+ public List<QueueACL> getUserAcls() {
+ initLocalQueueUserAclsList();
+ return this.userAclsList;
+ }
+
+ @Override
+ public void setQueueName(String queueName) {
+ maybeInitBuilder();
+ if (queueName == null) {
+ builder.clearQueueName();
+ return;
+ }
+ builder.setQueueName(queueName);
+ }
+
+ @Override
+ public void setUserAcls(List<QueueACL> userAclsList) {
+ if (userAclsList == null) {
+ builder.clearUserAcls();
+ }
+ this.userAclsList = userAclsList;
+ }
+
+ @Override
+ public QueueUserACLInfoProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void initLocalQueueUserAclsList() {
+ if (this.userAclsList != null) {
+ return;
+ }
+ QueueUserACLInfoProtoOrBuilder p = viaProto ? proto : builder;
+ List<QueueACLProto> list = p.getUserAclsList();
+ userAclsList = new ArrayList<QueueACL>();
+
+ for (QueueACLProto a : list) {
+ userAclsList.add(convertFromProtoFormat(a));
+ }
+ }
+
+ private void addQueueACLsToProto() {
+ maybeInitBuilder();
+ builder.clearUserAcls();
+ if (userAclsList == null)
+ return;
+ Iterable<QueueACLProto> iterable = new Iterable<QueueACLProto>() {
+ @Override
+ public Iterator<QueueACLProto> iterator() {
+ return new Iterator<QueueACLProto>() {
+
+ Iterator<QueueACL> iter = userAclsList.iterator();
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+
+ @Override
+ public QueueACLProto next() {
+ return convertToProtoFormat(iter.next());
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+
+ }
+ };
+
+ }
+ };
+ builder.addAllUserAcls(iterable);
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = QueueUserACLInfoProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.userAclsList != null) {
+ addQueueACLsToProto();
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private QueueACL convertFromProtoFormat(QueueACLProto q) {
+ return ProtoUtils.convertFromProtoFormat(q);
+ }
+
+ private QueueACLProto convertToProtoFormat(QueueACL queueAcl) {
+ return ProtoUtils.convertToProtoFormat(queueAcl);
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
new file mode 100644
index 0000000..5bc131e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -0,0 +1,57 @@
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProtoOrBuilder;
+
+
+
+public class ResourcePBImpl extends ProtoBase<ResourceProto> implements Resource {
+ ResourceProto proto = ResourceProto.getDefaultInstance();
+ ResourceProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public ResourcePBImpl() {
+ builder = ResourceProto.newBuilder();
+ }
+
+ public ResourcePBImpl(ResourceProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public ResourceProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = ResourceProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public int getMemory() {
+ ResourceProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getMemory());
+ }
+
+ @Override
+ public void setMemory(int memory) {
+ maybeInitBuilder();
+ builder.setMemory((memory));
+ }
+
+ @Override
+ public int compareTo(Resource other) {
+ return this.getMemory() - other.getMemory();
+ }
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
new file mode 100644
index 0000000..8780c93
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
@@ -0,0 +1,174 @@
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.impl.pb.PriorityPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProtoOrBuilder;
+
+
+
+public class ResourceRequestPBImpl extends ProtoBase<ResourceRequestProto> implements ResourceRequest {
+ ResourceRequestProto proto = ResourceRequestProto.getDefaultInstance();
+ ResourceRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private Priority priority = null;
+ private Resource capability = null;
+
+
+ public ResourceRequestPBImpl() {
+ builder = ResourceRequestProto.newBuilder();
+ }
+
+ public ResourceRequestPBImpl(ResourceRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public ResourceRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.priority != null) {
+ builder.setPriority(convertToProtoFormat(this.priority));
+ }
+ if (this.capability != null) {
+ builder.setCapability(convertToProtoFormat(this.capability));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = ResourceRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public Priority getPriority() {
+ ResourceRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.priority != null) {
+ return this.priority;
+ }
+ if (!p.hasPriority()) {
+ return null;
+ }
+ this.priority = convertFromProtoFormat(p.getPriority());
+ return this.priority;
+ }
+
+ @Override
+ public void setPriority(Priority priority) {
+ maybeInitBuilder();
+ if (priority == null)
+ builder.clearPriority();
+ this.priority = priority;
+ }
+ @Override
+ public String getHostName() {
+ ResourceRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasHostName()) {
+ return null;
+ }
+ return (p.getHostName());
+ }
+
+ @Override
+ public void setHostName(String hostName) {
+ maybeInitBuilder();
+ if (hostName == null) {
+ builder.clearHostName();
+ return;
+ }
+ builder.setHostName((hostName));
+ }
+ @Override
+ public Resource getCapability() {
+ ResourceRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.capability != null) {
+ return this.capability;
+ }
+ if (!p.hasCapability()) {
+ return null;
+ }
+ this.capability = convertFromProtoFormat(p.getCapability());
+ return this.capability;
+ }
+
+ @Override
+ public void setCapability(Resource capability) {
+ maybeInitBuilder();
+ if (capability == null)
+ builder.clearCapability();
+ this.capability = capability;
+ }
+ @Override
+ public int getNumContainers() {
+ ResourceRequestProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getNumContainers());
+ }
+
+ @Override
+ public void setNumContainers(int numContainers) {
+ maybeInitBuilder();
+ builder.setNumContainers((numContainers));
+ }
+
+ private PriorityPBImpl convertFromProtoFormat(PriorityProto p) {
+ return new PriorityPBImpl(p);
+ }
+
+ private PriorityProto convertToProtoFormat(Priority t) {
+ return ((PriorityPBImpl)t).getProto();
+ }
+
+ private ResourcePBImpl convertFromProtoFormat(ResourceProto p) {
+ return new ResourcePBImpl(p);
+ }
+
+ private ResourceProto convertToProtoFormat(Resource t) {
+ return ((ResourcePBImpl)t).getProto();
+ }
+
+ @Override
+ public int compareTo(ResourceRequest other) {
+ if (this.getPriority().compareTo(other.getPriority()) == 0) {
+ if (this.getHostName().equals(other.getHostName())) {
+ if (this.getCapability().equals(other.getCapability())) {
+ if (this.getNumContainers() == other.getNumContainers()) {
+ return 0;
+ } else {
+ return this.getNumContainers() - other.getNumContainers();
+ }
+ } else {
+ return this.getCapability().compareTo(other.getCapability());
+ }
+ } else {
+ return this.getHostName().compareTo(other.getHostName());
+ }
+ } else {
+ return this.getPriority().compareTo(other.getPriority());
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/URLPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/URLPBImpl.java
new file mode 100644
index 0000000..cf18b64
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/URLPBImpl.java
@@ -0,0 +1,107 @@
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.proto.YarnProtos.URLProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.URLProtoOrBuilder;
+
+
+
+public class URLPBImpl extends ProtoBase<URLProto> implements URL {
+ URLProto proto = URLProto.getDefaultInstance();
+ URLProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public URLPBImpl() {
+ builder = URLProto.newBuilder();
+ }
+
+ public URLPBImpl(URLProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public URLProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = URLProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public String getFile() {
+ URLProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasFile()) {
+ return null;
+ }
+ return (p.getFile());
+ }
+
+ @Override
+ public void setFile(String file) {
+ maybeInitBuilder();
+ if (file == null) {
+ builder.clearFile();
+ return;
+ }
+ builder.setFile((file));
+ }
+ @Override
+ public String getScheme() {
+ URLProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasScheme()) {
+ return null;
+ }
+ return (p.getScheme());
+ }
+
+ @Override
+ public void setScheme(String scheme) {
+ maybeInitBuilder();
+ if (scheme == null) {
+ builder.clearScheme();
+ return;
+ }
+ builder.setScheme((scheme));
+ }
+ @Override
+ public String getHost() {
+ URLProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasHost()) {
+ return null;
+ }
+ return (p.getHost());
+ }
+
+ @Override
+ public void setHost(String host) {
+ maybeInitBuilder();
+ if (host == null) {
+ builder.clearHost();
+ return;
+ }
+ builder.setHost((host));
+ }
+ @Override
+ public int getPort() {
+ URLProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getPort());
+ }
+
+ @Override
+ public void setPort(int port) {
+ maybeInitBuilder();
+ builder.setPort((port));
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/YarnClusterMetricsPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/YarnClusterMetricsPBImpl.java
new file mode 100644
index 0000000..eed51cd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/YarnClusterMetricsPBImpl.java
@@ -0,0 +1,53 @@
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
+import org.apache.hadoop.yarn.proto.YarnProtos.YarnClusterMetricsProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.YarnClusterMetricsProtoOrBuilder;
+
+
+
+public class YarnClusterMetricsPBImpl extends ProtoBase<YarnClusterMetricsProto> implements YarnClusterMetrics {
+ YarnClusterMetricsProto proto = YarnClusterMetricsProto.getDefaultInstance();
+ YarnClusterMetricsProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public YarnClusterMetricsPBImpl() {
+ builder = YarnClusterMetricsProto.newBuilder();
+ }
+
+ public YarnClusterMetricsPBImpl(YarnClusterMetricsProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public YarnClusterMetricsProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = YarnClusterMetricsProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public int getNumNodeManagers() {
+ YarnClusterMetricsProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getNumNodeManagers());
+ }
+
+ @Override
+ public void setNumNodeManagers(int numNodeManagers) {
+ maybeInitBuilder();
+ builder.setNumNodeManagers((numNodeManagers));
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/YarnRemoteException.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/YarnRemoteException.java
new file mode 100644
index 0000000..5c4210c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/YarnRemoteException.java
@@ -0,0 +1,43 @@
+package org.apache.hadoop.yarn.exceptions;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.io.PrintWriter;
+
+public abstract class YarnRemoteException extends IOException {
+ private static final long serialVersionUID = 1L;
+
+ public YarnRemoteException() {
+ super();
+ }
+
+ public YarnRemoteException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+ public YarnRemoteException(Throwable cause) {
+ super(cause);
+ }
+
+ public YarnRemoteException(String message) {
+ super(message);
+ }
+
+ @Override
+ public void printStackTrace(PrintWriter pw) {
+ pw.append("RemoteTrace: \n").append(getRemoteTrace())
+ .append(" at LocalTrace: \n\t");
+ super.printStackTrace(pw);
+ }
+
+ @Override
+ public void printStackTrace(PrintStream ps) {
+ ps.append("RemoteTrace: \n").append(getRemoteTrace())
+ .append(" at Local Trace: \n\t");
+ super.printStackTrace(ps);
+ }
+
+ public abstract String getRemoteTrace();
+
+ public abstract YarnRemoteException getCause();
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/impl/pb/YarnRemoteExceptionPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/impl/pb/YarnRemoteExceptionPBImpl.java
new file mode 100644
index 0000000..be437c4
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/impl/pb/YarnRemoteExceptionPBImpl.java
@@ -0,0 +1,90 @@
+package org.apache.hadoop.yarn.exceptions.impl.pb;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.proto.YarnProtos.YarnRemoteExceptionProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.YarnRemoteExceptionProtoOrBuilder;
+
+public class YarnRemoteExceptionPBImpl extends YarnRemoteException {
+
+ private static final long serialVersionUID = 1L;
+
+ YarnRemoteExceptionProto proto = YarnRemoteExceptionProto.getDefaultInstance();
+ YarnRemoteExceptionProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public YarnRemoteExceptionPBImpl() {
+ }
+
+ public YarnRemoteExceptionPBImpl(YarnRemoteExceptionProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public YarnRemoteExceptionPBImpl(String message) {
+ super(message);
+ maybeInitBuilder();
+ builder.setMessage(super.getMessage());
+ }
+
+ public YarnRemoteExceptionPBImpl(Throwable t) {
+ super(t);
+ maybeInitBuilder();
+
+ if (t.getCause() == null) {
+ } else {
+ builder.setCause(new YarnRemoteExceptionPBImpl(t.getCause()).getProto());
+ builder.setClassName(t.getClass().getName());
+ }
+ StringWriter sw = new StringWriter();
+ PrintWriter pw = new PrintWriter(sw);
+ t.printStackTrace(pw);
+ pw.close();
+ if (sw.toString() != null)
+ builder.setTrace(sw.toString());
+ if (t.getMessage() != null)
+ builder.setMessage(t.getMessage());
+ }
+
+ public YarnRemoteExceptionPBImpl(String message, Throwable t) {
+ this(t);
+ if (message != null)
+ builder.setMessage(message);
+ }
+ @Override
+ public String getMessage() {
+ YarnRemoteExceptionProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getMessage();
+ }
+
+ @Override
+ public String getRemoteTrace() {
+ YarnRemoteExceptionProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getTrace();
+ }
+
+ @Override
+ public YarnRemoteException getCause() {
+ YarnRemoteExceptionProtoOrBuilder p = viaProto ? proto : builder;
+ if (p.hasCause()) {
+ return new YarnRemoteExceptionPBImpl(p.getCause());
+ } else {
+ return null;
+ }
+ }
+
+ public YarnRemoteExceptionProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = YarnRemoteExceptionProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/ProtoUtils.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/ProtoUtils.java
new file mode 100644
index 0000000..7a73f53
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/ProtoUtils.java
@@ -0,0 +1,106 @@
+package org.apache.hadoop.yarn.util;
+
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.QueueACL;
+import org.apache.hadoop.yarn.api.records.QueueState;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationStateProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceTypeProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceVisibilityProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.QueueACLProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.QueueStateProto;
+
+import com.google.protobuf.ByteString;
+
+public class ProtoUtils {
+
+
+ /*
+ * ContainerState
+ */
+ private static String CONTAINER_STATE_PREFIX = "C_";
+ public static ContainerStateProto convertToProtoFormat(ContainerState e) {
+ return ContainerStateProto.valueOf(CONTAINER_STATE_PREFIX + e.name());
+ }
+ public static ContainerState convertFromProtoFormat(ContainerStateProto e) {
+ return ContainerState.valueOf(e.name().replace(CONTAINER_STATE_PREFIX, ""));
+ }
+
+
+ /*
+ * ApplicationState
+ */
+ public static ApplicationStateProto convertToProtoFormat(ApplicationState e) {
+ return ApplicationStateProto.valueOf(e.name());
+ }
+ public static ApplicationState convertFromProtoFormat(ApplicationStateProto e) {
+ return ApplicationState.valueOf(e.name());
+ }
+
+ /*
+ * LocalResourceType
+ */
+ public static LocalResourceTypeProto convertToProtoFormat(LocalResourceType e) {
+ return LocalResourceTypeProto.valueOf(e.name());
+ }
+ public static LocalResourceType convertFromProtoFormat(LocalResourceTypeProto e) {
+ return LocalResourceType.valueOf(e.name());
+ }
+
+ /*
+ * LocalResourceVisibility
+ */
+ public static LocalResourceVisibilityProto convertToProtoFormat(LocalResourceVisibility e) {
+ return LocalResourceVisibilityProto.valueOf(e.name());
+ }
+ public static LocalResourceVisibility convertFromProtoFormat(LocalResourceVisibilityProto e) {
+ return LocalResourceVisibility.valueOf(e.name());
+ }
+
+ /*
+ * ByteBuffer
+ */
+ public static ByteBuffer convertFromProtoFormat(ByteString byteString) {
+ int capacity = byteString.asReadOnlyByteBuffer().rewind().remaining();
+ byte[] b = new byte[capacity];
+ byteString.asReadOnlyByteBuffer().get(b, 0, capacity);
+ return ByteBuffer.wrap(b);
+ }
+
+ public static ByteString convertToProtoFormat(ByteBuffer byteBuffer) {
+// return ByteString.copyFrom((ByteBuffer)byteBuffer.duplicate().rewind());
+ int oldPos = byteBuffer.position();
+ byteBuffer.rewind();
+ ByteString bs = ByteString.copyFrom(byteBuffer);
+ byteBuffer.position(oldPos);
+ return bs;
+ }
+
+ /*
+ * QueueState
+ */
+ private static String QUEUE_STATE_PREFIX = "Q_";
+ public static QueueStateProto convertToProtoFormat(QueueState e) {
+ return QueueStateProto.valueOf(QUEUE_STATE_PREFIX + e.name());
+ }
+ public static QueueState convertFromProtoFormat(QueueStateProto e) {
+ return QueueState.valueOf(e.name().replace(QUEUE_STATE_PREFIX, ""));
+ }
+
+ /*
+ * QueueACL
+ */
+ private static String QUEUE_ACL_PREFIX = "Q_";
+ public static QueueACLProto convertToProtoFormat(QueueACL e) {
+ return QueueACLProto.valueOf(QUEUE_ACL_PREFIX + e.name());
+ }
+ public static QueueACL convertFromProtoFormat(QueueACLProto e) {
+ return QueueACL.valueOf(e.name().replace(QUEUE_ACL_PREFIX, ""));
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/proto/AM_RM_protocol.proto b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/proto/AM_RM_protocol.proto
new file mode 100644
index 0000000..98599ea
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/proto/AM_RM_protocol.proto
@@ -0,0 +1,13 @@
+option java_package = "org.apache.hadoop.yarn.proto";
+option java_outer_classname = "AMRMProtocol";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+
+import "yarn_service_protos.proto";
+
+
+service AMRMProtocolService {
+ rpc registerApplicationMaster (RegisterApplicationMasterRequestProto) returns (RegisterApplicationMasterResponseProto);
+ rpc finishApplicationMaster (FinishApplicationMasterRequestProto) returns (FinishApplicationMasterResponseProto);
+ rpc allocate (AllocateRequestProto) returns (AllocateResponseProto);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/proto/client_RM_protocol.proto b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/proto/client_RM_protocol.proto
new file mode 100644
index 0000000..7091f94
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/proto/client_RM_protocol.proto
@@ -0,0 +1,19 @@
+option java_package = "org.apache.hadoop.yarn.proto";
+option java_outer_classname = "ClientRMProtocol";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+
+import "yarn_service_protos.proto";
+
+service ClientRMProtocolService {
+ rpc getNewApplicationId (GetNewApplicationIdRequestProto) returns (GetNewApplicationIdResponseProto);
+ rpc getApplicationReport (GetApplicationReportRequestProto) returns (GetApplicationReportResponseProto);
+ rpc submitApplication (SubmitApplicationRequestProto) returns (SubmitApplicationResponseProto);
+ rpc finishApplication (FinishApplicationRequestProto) returns (FinishApplicationResponseProto);
+ rpc getClusterMetrics (GetClusterMetricsRequestProto) returns (GetClusterMetricsResponseProto);
+ rpc getAllApplications (GetAllApplicationsRequestProto) returns (GetAllApplicationsResponseProto);
+ rpc getClusterNodes (GetClusterNodesRequestProto) returns (GetClusterNodesResponseProto);
+ rpc getQueueInfo (GetQueueInfoRequestProto) returns (GetQueueInfoResponseProto);
+ rpc getQueueUserAcls (GetQueueUserAclsInfoRequestProto) returns (GetQueueUserAclsInfoResponseProto);
+}
+
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/proto/container_manager.proto b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/proto/container_manager.proto
new file mode 100644
index 0000000..7a6aff9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/proto/container_manager.proto
@@ -0,0 +1,12 @@
+option java_package = "org.apache.hadoop.yarn.proto";
+option java_outer_classname = "ContainerManager";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+
+import "yarn_service_protos.proto";
+
+service ContainerManagerService {
+ rpc startContainer(StartContainerRequestProto) returns (StartContainerResponseProto);
+ rpc stopContainer(StopContainerRequestProto) returns (StopContainerResponseProto);
+ rpc getContainerStatus(GetContainerStatusRequestProto) returns (GetContainerStatusResponseProto);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
new file mode 100644
index 0000000..5eef40a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -0,0 +1,265 @@
+option java_package = "org.apache.hadoop.yarn.proto";
+option java_outer_classname = "YarnProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+
+message YarnRemoteExceptionProto {
+ optional string message = 1;
+ optional string trace = 2;
+ optional string class_name = 3;
+ optional YarnRemoteExceptionProto cause = 4;
+}
+
+message ApplicationIdProto {
+ optional int32 id = 1;
+ optional int64 cluster_timestamp = 2;
+}
+
+message ApplicationAttemptIdProto {
+ optional ApplicationIdProto application_id = 1;
+ optional int32 attemptId = 2;
+}
+
+message ContainerIdProto {
+ optional ApplicationIdProto app_id = 1;
+ optional ApplicationAttemptIdProto app_attempt_id = 2;
+ optional int32 id = 3;
+}
+
+message ResourceProto {
+ optional int32 memory = 1;
+}
+
+enum ContainerStateProto {
+ C_NEW = 1;
+ C_RUNNING = 2;
+ C_COMPLETE = 3;
+}
+
+message ContainerTokenProto {
+ optional bytes identifier = 1;
+ optional bytes password = 2;
+ optional string kind = 3;
+ optional string service = 4;
+}
+
+message ContainerProto {
+ optional ContainerIdProto id = 1;
+ optional NodeIdProto nodeId = 2;
+ optional string node_http_address = 3;
+ optional ResourceProto resource = 4;
+ optional ContainerStateProto state = 5;
+ optional ContainerTokenProto container_token = 6;
+ optional ContainerStatusProto container_status = 7;
+}
+
+enum ApplicationStateProto {
+ NEW = 1;
+ SUBMITTED = 2;
+ RUNNING = 3;
+ RESTARTING = 4;
+ SUCCEEDED = 5;
+ FAILED = 6;
+ KILLED = 7;
+}
+
+message ApplicationStatusProto {
+ optional int32 response_id = 1;
+ optional ApplicationAttemptIdProto application_attempt_id = 2;
+ optional float progress = 3;
+}
+
+message ApplicationMasterProto {
+ optional ApplicationIdProto application_id = 1;
+ optional string host = 2;
+ optional int32 rpc_port = 3;
+ optional string trackingUrl = 4;
+ optional ApplicationStatusProto status = 5;
+ optional ApplicationStateProto state = 6;
+ optional string client_token = 7;
+ optional int32 containerCount = 8;
+ optional int32 amFailCount = 9;
+ optional string diagnostics = 10 [default = ""];
+}
+
+message URLProto {
+ optional string scheme = 1;
+ optional string host = 2;
+ optional int32 port = 3;
+ optional string file = 4;
+}
+
+enum LocalResourceVisibilityProto {
+ PUBLIC = 1;
+ PRIVATE = 2;
+ APPLICATION = 3;
+}
+
+enum LocalResourceTypeProto {
+ ARCHIVE = 1;
+ FILE = 2;
+}
+
+message LocalResourceProto {
+ optional URLProto resource = 1;
+ optional int64 size = 2;
+ optional int64 timestamp = 3;
+ optional LocalResourceTypeProto type = 4;
+ optional LocalResourceVisibilityProto visibility= 5;
+}
+
+message ApplicationReportProto {
+ optional ApplicationIdProto applicationId = 1;
+ optional string user = 2;
+ optional string queue = 3;
+ optional string name = 4;
+ optional string host = 5;
+ optional int32 rpc_port = 6;
+ optional string client_token = 7;
+ optional ApplicationStatusProto status = 8;
+ optional ApplicationStateProto state = 9;
+ optional ContainerProto masterContainer = 10;
+ optional string trackingUrl = 11;
+ optional string diagnostics = 12 [default = "N/A"];
+}
+
+message NodeIdProto {
+ optional string host = 1;
+ optional int32 port = 2;
+}
+
+message NodeHealthStatusProto {
+ optional bool is_node_healthy = 1;
+ optional string health_report = 2;
+ optional int64 last_health_report_time = 3;
+}
+
+message NodeReportProto {
+ optional NodeIdProto nodeId = 1;
+ optional string httpAddress = 2;
+ optional string rackName = 3;
+ optional ResourceProto used = 4;
+ optional ResourceProto capability = 5;
+ optional int32 numContainers = 6;
+ optional NodeHealthStatusProto node_health_status = 8;
+}
+
+
+////////////////////////////////////////////////////////////////////////
+////// From AM_RM_Protocol /////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+message ResourceRequestProto {
+ optional PriorityProto priority = 1;
+ optional string host_name = 2;
+ optional ResourceProto capability = 3;
+ optional int32 num_containers = 4;
+}
+
+message AMResponseProto {
+ optional bool reboot = 1;
+ optional int32 response_id = 2;
+ repeated ContainerProto new_containers = 3;
+ repeated ContainerProto finished_containers = 4;
+ optional ResourceProto limit = 5;
+}
+
+
+////////////////////////////////////////////////////////////////////////
+////// From client_RM_Protocol /////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+message ApplicationSubmissionContextProto {
+ optional ApplicationIdProto application_id = 1;
+ optional string application_name = 2;
+ optional ResourceProto master_capability = 3;
+ repeated StringURLMapProto resources = 4;
+ repeated StringLocalResourceMapProto resources_todo = 5;
+ repeated string fs_tokens = 6;
+ optional bytes fs_tokens_todo = 7;
+ repeated StringStringMapProto environment = 8;
+ repeated string command = 9;
+ optional string queue = 10;
+ optional PriorityProto priority = 11;
+ optional string user = 12;
+}
+
+message YarnClusterMetricsProto {
+ optional int32 num_node_managers = 1;
+}
+
+enum QueueStateProto {
+ Q_STOPPED = 1;
+ Q_RUNNING = 2;
+}
+
+message QueueInfoProto {
+ optional string queueName = 1;
+ optional float capacity = 2;
+ optional float maximumCapacity = 3;
+ optional float currentCapacity = 4;
+ optional QueueStateProto state = 5;
+ repeated QueueInfoProto childQueues = 6;
+ repeated ApplicationReportProto applications = 7;
+}
+
+enum QueueACLProto {
+ Q_SUBMIT_JOB = 1;
+ Q_ADMINISTER_QUEUE = 2;
+ Q_ADMINISTER_JOBS = 3;
+}
+
+message QueueUserACLInfoProto {
+ optional string queueName = 1;
+ repeated QueueACLProto userAcls = 2;
+}
+
+////////////////////////////////////////////////////////////////////////
+////// From container_manager //////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+
+message ContainerLaunchContextProto {
+ optional ContainerIdProto container_id = 1;
+ optional string user = 2;
+ optional ResourceProto resource = 3;
+ repeated StringLocalResourceMapProto localResources = 4;
+ optional bytes container_tokens = 5;
+ repeated StringBytesMapProto service_data = 6;
+ repeated StringStringMapProto env = 7;
+ repeated string command = 8;
+}
+
+message ContainerStatusProto {
+ optional ContainerIdProto container_id = 1;
+ optional ContainerStateProto state = 2;
+ optional string diagnostics = 3 [default = "N/A"];
+ optional string exit_status = 4 [default = "N/A"];
+}
+
+
+
+////////////////////////////////////////////////////////////////////////
+////// From common//////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+message PriorityProto {
+ optional int32 priority = 1;
+}
+
+message StringURLMapProto {
+ optional string key = 1;
+ optional URLProto value = 2;
+}
+
+message StringLocalResourceMapProto {
+ optional string key = 1;
+ optional LocalResourceProto value = 2;
+}
+
+message StringStringMapProto {
+ optional string key = 1;
+ optional string value = 2;
+}
+
+message StringBytesMapProto {
+ optional string key = 1;
+ optional bytes value = 2;
+}
+
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
new file mode 100644
index 0000000..e0f79dc
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -0,0 +1,143 @@
+option java_package = "org.apache.hadoop.yarn.proto";
+option java_outer_classname = "YarnServiceProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+
+import "yarn_protos.proto";
+
+//////////////////////////////////////////////////////
+/////// AM_RM_Protocol ///////////////////////////////
+//////////////////////////////////////////////////////
+message RegisterApplicationMasterRequestProto {
+ optional ApplicationAttemptIdProto application_attempt_id = 1;
+ optional string host = 2;
+ optional int32 rpc_port = 3;
+ optional string tracking_url = 4;
+}
+
+message RegisterApplicationMasterResponseProto {
+ optional ResourceProto minimumCapability = 1;
+ optional ResourceProto maximumCapability = 2;
+}
+
+message FinishApplicationMasterRequestProto {
+ optional ApplicationAttemptIdProto application_attempt_id = 1;
+ optional string diagnostics = 2;
+ optional string tracking_url = 3;
+ optional string final_state = 4;
+}
+
+message FinishApplicationMasterResponseProto {
+}
+
+message AllocateRequestProto {
+ optional ApplicationAttemptIdProto application_attempt_id = 1;
+ repeated ResourceRequestProto ask = 2;
+ repeated ContainerIdProto release = 3;
+ optional int32 response_id = 4;
+ optional float progress = 5;
+}
+
+message AllocateResponseProto {
+ optional AMResponseProto AM_response = 1;
+}
+
+
+
+//////////////////////////////////////////////////////
+/////// client_RM_Protocol ///////////////////////////
+//////////////////////////////////////////////////////
+
+message GetNewApplicationIdRequestProto {
+}
+
+message GetNewApplicationIdResponseProto {
+ optional ApplicationIdProto application_id = 1;
+}
+
+message GetApplicationReportRequestProto {
+ optional ApplicationIdProto application_id = 1;
+}
+
+message GetApplicationReportResponseProto {
+ optional ApplicationReportProto application_report = 1;
+}
+
+message SubmitApplicationRequestProto {
+ optional ApplicationSubmissionContextProto application_submission_context= 1;
+}
+
+message SubmitApplicationResponseProto {
+}
+
+message FinishApplicationRequestProto {
+ optional ApplicationIdProto application_id = 1;
+}
+
+message FinishApplicationResponseProto {
+}
+
+message GetClusterMetricsRequestProto {
+}
+
+message GetClusterMetricsResponseProto {
+ optional YarnClusterMetricsProto cluster_metrics = 1;
+}
+
+message GetAllApplicationsRequestProto {
+}
+
+message GetAllApplicationsResponseProto {
+ repeated ApplicationReportProto applications = 1;
+}
+
+message GetClusterNodesRequestProto {
+}
+
+message GetClusterNodesResponseProto {
+ repeated NodeReportProto nodeReports = 1;
+}
+
+message GetQueueInfoRequestProto {
+ optional string queueName = 1;
+ optional bool includeApplications = 2;
+ optional bool includeChildQueues = 3;
+ optional bool recursive = 4;
+}
+
+message GetQueueInfoResponseProto {
+ optional QueueInfoProto queueInfo = 1;
+}
+
+message GetQueueUserAclsInfoRequestProto {
+}
+
+message GetQueueUserAclsInfoResponseProto {
+ repeated QueueUserACLInfoProto queueUserAcls = 1;
+}
+
+//////////////////////////////////////////////////////
+/////// client_NM_Protocol ///////////////////////////
+//////////////////////////////////////////////////////
+
+message StartContainerRequestProto {
+ optional ContainerLaunchContextProto container_launch_context = 1;
+}
+
+message StartContainerResponseProto {
+}
+
+message StopContainerRequestProto {
+ optional ContainerIdProto container_id = 1;
+}
+
+message StopContainerResponseProto {
+}
+
+message GetContainerStatusRequestProto {
+ optional ContainerIdProto container_id = 1;
+}
+
+message GetContainerStatusResponseProto {
+ optional ContainerStatusProto status = 1;
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/pom.xml
new file mode 100644
index 0000000..09dfb8d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -0,0 +1,131 @@
+<?xml version="1.0"?>
+<project>
+ <parent>
+ <artifactId>hadoop-yarn</artifactId>
+ <groupId>org.apache.hadoop</groupId>
+ <version>${yarn.version}</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-common</artifactId>
+ <name>hadoop-yarn-common</name>
+
+ <properties>
+ <install.file>${project.artifact.file}</install.file>
+ <yarn.basedir>${project.parent.basedir}</yarn.basedir>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>log4j</groupId>
+ <artifactId>log4j</artifactId>
+ <version>1.2.12</version>
+ <exclusions>
+ <exclusion>
+ <groupId>com.sun.jdmk</groupId>
+ <artifactId>jmxtools</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.sun.jmx</groupId>
+ <artifactId>jmxri</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-api</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-jar-plugin</artifactId>
+ <executions>
+ <execution>
+ <goals>
+ <goal>test-jar</goal>
+ </goals>
+ <phase>test-compile</phase>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>create-protobuf-generated-sources-directory</id>
+ <phase>initialize</phase>
+ <configuration>
+ <target>
+ <mkdir dir="target/generated-sources/proto" />
+ </target>
+ </configuration>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>generate-sources</id>
+ <phase>generate-sources</phase>
+ <configuration>
+ <executable>protoc</executable>
+ <arguments>
+ <argument>-I../hadoop-yarn-api/src/main/proto/</argument>
+ <argument>-Isrc/main/proto/</argument>
+ <argument>--java_out=target/generated-sources/proto</argument>
+ <argument>src/main/proto/yarnprototunnelrpc.proto</argument>
+ </arguments>
+ </configuration>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ </execution>
+ <execution>
+ <id>generate-version</id>
+ <phase>generate-sources</phase>
+ <configuration>
+ <executable>scripts/saveVersion.sh</executable>
+ <arguments>
+ <argument>${yarn.version}</argument>
+ <argument>${project.build.directory}</argument>
+ </arguments>
+ </configuration>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>add-source</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>add-source</goal>
+ </goals>
+ <configuration>
+ <sources>
+ <source>target/generated-sources/proto</source>
+ <source>target/generated-sources/version</source>
+ </sources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/scripts/saveVersion.sh b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/scripts/saveVersion.sh
new file mode 100755
index 0000000..11d7022
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/scripts/saveVersion.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# This file is used to generate the package-info.java class that
+# records the version, revision, branch, user, timestamp, and url
+unset LANG
+unset LC_CTYPE
+unset LC_TIME
+version=$1
+build_dir=$2
+user=`whoami`
+date=`date`
+dir=`pwd`
+cwd=`dirname $dir`
+if git rev-parse HEAD 2>/dev/null > /dev/null ; then
+ revision=`git log -1 --pretty=format:"%H" ../`
+ hostname=`hostname`
+ branch=`git branch | sed -n -e 's/^* //p'`
+ url="git://${hostname}${cwd}"
+elif [ -d .svn ]; then
+ revision=`svn info ../ | sed -n -e 's/Last Changed Rev: \(.*\)/\1/p'`
+ url=`svn info ../ | sed -n -e 's/URL: \(.*\)/\1/p'`
+ # Get canonical branch (branches/X, tags/X, or trunk)
+ branch=`echo $url | sed -n -e 's,.*\(branches/.*\)$,\1,p' \
+ -e 's,.*\(tags/.*\)$,\1,p' \
+ -e 's,.*trunk$,trunk,p'`
+else
+ revision="Unknown"
+ branch="Unknown"
+ url="file://$cwd"
+fi
+srcChecksum=`find ../ -name '*.java' | grep -v generated-sources | LC_ALL=C sort | xargs md5sum | md5sum | cut -d ' ' -f 1`
+
+mkdir -p $build_dir/generated-sources/version/org/apache/hadoop/yarn/
+cat << EOF | \
+ sed -e "s/VERSION/$version/" -e "s/USER/$user/" -e "s/DATE/$date/" \
+ -e "s|URL|$url|" -e "s/REV/$revision/" \
+ -e "s|BRANCH|$branch|" -e "s/SRCCHECKSUM/$srcChecksum/" \
+ > $build_dir/generated-sources/version/org/apache/hadoop/yarn/package-info.java
+/*
+ * Generated by saveVersion.sh
+ */
+@YarnVersionAnnotation(version="VERSION", revision="REV", branch="BRANCH",
+ user="USER", date="DATE", url="URL",
+ srcChecksum="SRCCHECKSUM")
+package org.apache.hadoop.yarn;
+EOF
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/Clock.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/Clock.java
new file mode 100644
index 0000000..94f10a8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/Clock.java
@@ -0,0 +1,24 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn;
+
+public interface Clock {
+
+ long getTime();
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ContainerLogAppender.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ContainerLogAppender.java
new file mode 100644
index 0000000..ac4a61f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ContainerLogAppender.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn;
+
+import java.io.File;
+import java.util.LinkedList;
+import java.util.Queue;
+
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.log4j.FileAppender;
+import org.apache.log4j.spi.LoggingEvent;
+
+/**
+ * A simple log4j-appender for the task child's
+ * map-reduce system logs.
+ *
+ */
+@InterfaceStability.Unstable
+public class ContainerLogAppender extends FileAppender {
+ private String containerLogDir;
+ //so that log4j can configure it from the configuration(log4j.properties).
+ private int maxEvents;
+ private Queue<LoggingEvent> tail = null;
+
+ @Override
+ public void activateOptions() {
+ synchronized (this) {
+ if (maxEvents > 0) {
+ tail = new LinkedList<LoggingEvent>();
+ }
+ setFile(new File(this.containerLogDir, "syslog").toString());
+ setAppend(true);
+ super.activateOptions();
+ }
+ }
+
+ @Override
+ public void append(LoggingEvent event) {
+ synchronized (this) {
+ if (tail == null) {
+ super.append(event);
+ } else {
+ if (tail.size() >= maxEvents) {
+ tail.remove();
+ }
+ tail.add(event);
+ }
+ }
+ }
+
+ public void flush() {
+ qw.flush();
+ }
+
+ @Override
+ public synchronized void close() {
+ if (tail != null) {
+ for(LoggingEvent event: tail) {
+ super.append(event);
+ }
+ }
+ super.close();
+ }
+
+ /**
+ * Getter/Setter methods for log4j.
+ */
+
+ public String getContainerLogDir() {
+ return this.containerLogDir;
+ }
+
+ public void setContainerLogDir(String containerLogDir) {
+ this.containerLogDir = containerLogDir;
+ }
+
+ private static final int EVENT_SIZE = 100;
+
+ public long getTotalLogFileSize() {
+ return maxEvents * EVENT_SIZE;
+ }
+
+ public void setTotalLogFileSize(long logSize) {
+ maxEvents = (int) logSize / EVENT_SIZE;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/Lock.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/Lock.java
new file mode 100644
index 0000000..05a3c3a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/Lock.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn;
+
+import java.lang.annotation.Documented;
+
+/**
+ * Annotation to document locking order.
+ */
+@Documented public @interface Lock {
+ @SuppressWarnings("unchecked")
+ Class[] value();
+
+ public class NoLock{}
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/SystemClock.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/SystemClock.java
new file mode 100644
index 0000000..ec72157
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/SystemClock.java
@@ -0,0 +1,25 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn;
+
+public class SystemClock implements Clock {
+
+ public long getTime() {
+ return System.currentTimeMillis();
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/YarnException.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/YarnException.java
new file mode 100644
index 0000000..b12fc81e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/YarnException.java
@@ -0,0 +1,29 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn;
+
+/** Base Yarn Exception.
+ */
+public class YarnException extends RuntimeException {
+ public YarnException(Throwable cause) { super(cause); }
+ public YarnException(String message) { super(message); }
+ public YarnException(String message, Throwable cause) {
+ super(message, cause);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/YarnVersionAnnotation.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/YarnVersionAnnotation.java
new file mode 100644
index 0000000..ec54b1a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/YarnVersionAnnotation.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn;
+
+import java.lang.annotation.*;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * A package attribute that captures the version of Yarn that was compiled.
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.PACKAGE)
+@InterfaceAudience.LimitedPrivate({"MapReduce", "yarn"})
+@InterfaceStability.Unstable
+public @interface YarnVersionAnnotation {
+
+ /**
+ * Get the Yarn version
+ * @return the version string "0.6.3-dev"
+ */
+ String version();
+
+ /**
+ * Get the username that compiled Yarn.
+ */
+ String user();
+
+ /**
+ * Get the date when Yarn was compiled.
+ * @return the date in unix 'date' format
+ */
+ String date();
+
+ /**
+ * Get the url for the subversion repository.
+ */
+ String url();
+
+ /**
+ * Get the subversion revision.
+ * @return the revision number as a string (eg. "451451")
+ */
+ String revision();
+
+ /**
+ * Get the branch from which this was compiled.
+ * @return The branch name, e.g. "trunk" or "branches/branch-0.20"
+ */
+ String branch();
+
+ /**
+ * Get a checksum of the source files from which
+ * Yarn was compiled.
+ * @return a string that uniquely identifies the source
+ **/
+ String srcChecksum();
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/AMRMProtocolPBClientImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/AMRMProtocolPBClientImpl.java
new file mode 100644
index 0000000..34e95f0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/AMRMProtocolPBClientImpl.java
@@ -0,0 +1,94 @@
+package org.apache.hadoop.yarn.api.impl.pb.client;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.yarn.api.AMRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationMasterRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationMasterResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterResponsePBImpl;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine;
+import org.apache.hadoop.yarn.proto.AMRMProtocol.AMRMProtocolService;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProto;
+
+import com.google.protobuf.ServiceException;
+
+public class AMRMProtocolPBClientImpl implements AMRMProtocol {
+
+ private AMRMProtocolService.BlockingInterface proxy;
+
+ public AMRMProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException {
+ RPC.setProtocolEngine(conf, AMRMProtocolService.BlockingInterface.class, ProtoOverHadoopRpcEngine.class);
+ proxy = (AMRMProtocolService.BlockingInterface)RPC.getProxy(
+ AMRMProtocolService.BlockingInterface.class, clientVersion, addr, conf);
+ }
+
+
+ @Override
+ public AllocateResponse allocate(AllocateRequest request)
+ throws YarnRemoteException {
+ AllocateRequestProto requestProto = ((AllocateRequestPBImpl)request).getProto();
+ try {
+ return new AllocateResponsePBImpl(proxy.allocate(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+
+
+ @Override
+ public FinishApplicationMasterResponse finishApplicationMaster(
+ FinishApplicationMasterRequest request) throws YarnRemoteException {
+ FinishApplicationMasterRequestProto requestProto = ((FinishApplicationMasterRequestPBImpl)request).getProto();
+ try {
+ return new FinishApplicationMasterResponsePBImpl(proxy.finishApplicationMaster(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public RegisterApplicationMasterResponse registerApplicationMaster(
+ RegisterApplicationMasterRequest request) throws YarnRemoteException {
+ RegisterApplicationMasterRequestProto requestProto = ((RegisterApplicationMasterRequestPBImpl)request).getProto();
+ try {
+ return new RegisterApplicationMasterResponsePBImpl(proxy.registerApplicationMaster(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java
new file mode 100644
index 0000000..d95f81d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java
@@ -0,0 +1,233 @@
+package org.apache.hadoop.yarn.api.impl.pb.client;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.yarn.api.ClientRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllApplicationsRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllApplicationsResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationIdRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationIdResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine;
+import org.apache.hadoop.yarn.proto.ClientRMProtocol.ClientRMProtocolService;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllApplicationsRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationIdRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoResponseProto;
+
+import com.google.protobuf.ServiceException;
+
+public class ClientRMProtocolPBClientImpl implements ClientRMProtocol {
+
+ private ClientRMProtocolService.BlockingInterface proxy;
+
+ public ClientRMProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException {
+ RPC.setProtocolEngine(conf, ClientRMProtocolService.BlockingInterface.class, ProtoOverHadoopRpcEngine.class);
+ proxy = (ClientRMProtocolService.BlockingInterface)RPC.getProxy(
+ ClientRMProtocolService.BlockingInterface.class, clientVersion, addr, conf);
+ }
+
+ @Override
+ public FinishApplicationResponse finishApplication(
+ FinishApplicationRequest request) throws YarnRemoteException {
+ FinishApplicationRequestProto requestProto = ((FinishApplicationRequestPBImpl)request).getProto();
+ try {
+ return new FinishApplicationResponsePBImpl(proxy.finishApplication(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public GetApplicationReportResponse getApplicationReport(
+ GetApplicationReportRequest request) throws YarnRemoteException {
+ GetApplicationReportRequestProto requestProto = ((GetApplicationReportRequestPBImpl)request).getProto();
+ try {
+ return new GetApplicationReportResponsePBImpl(proxy.getApplicationReport(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public GetClusterMetricsResponse getClusterMetrics(
+ GetClusterMetricsRequest request) throws YarnRemoteException {
+ GetClusterMetricsRequestProto requestProto = ((GetClusterMetricsRequestPBImpl)request).getProto();
+ try {
+ return new GetClusterMetricsResponsePBImpl(proxy.getClusterMetrics(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public GetNewApplicationIdResponse getNewApplicationId(
+ GetNewApplicationIdRequest request) throws YarnRemoteException {
+ GetNewApplicationIdRequestProto requestProto = ((GetNewApplicationIdRequestPBImpl)request).getProto();
+ try {
+ return new GetNewApplicationIdResponsePBImpl(proxy.getNewApplicationId(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public SubmitApplicationResponse submitApplication(
+ SubmitApplicationRequest request) throws YarnRemoteException {
+ SubmitApplicationRequestProto requestProto = ((SubmitApplicationRequestPBImpl)request).getProto();
+ try {
+ return new SubmitApplicationResponsePBImpl(proxy.submitApplication(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public GetAllApplicationsResponse getAllApplications(
+ GetAllApplicationsRequest request) throws YarnRemoteException {
+ GetAllApplicationsRequestProto requestProto =
+ ((GetAllApplicationsRequestPBImpl)request).getProto();
+ try {
+ return new GetAllApplicationsResponsePBImpl(
+ proxy.getAllApplications(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public GetClusterNodesResponse getClusterNodes(
+ GetClusterNodesRequest request) throws YarnRemoteException {
+ GetClusterNodesRequestProto requestProto =
+ ((GetClusterNodesRequestPBImpl)request).getProto();
+ try {
+ return new GetClusterNodesResponsePBImpl(
+ proxy.getClusterNodes(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request)
+ throws YarnRemoteException {
+ GetQueueInfoRequestProto requestProto =
+ ((GetQueueInfoRequestPBImpl)request).getProto();
+ try {
+ return new GetQueueInfoResponsePBImpl(
+ proxy.getQueueInfo(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public GetQueueUserAclsInfoResponse getQueueUserAcls(
+ GetQueueUserAclsInfoRequest request) throws YarnRemoteException {
+ GetQueueUserAclsInfoRequestProto requestProto =
+ ((GetQueueUserAclsInfoRequestPBImpl)request).getProto();
+ try {
+ return new GetQueueUserAclsInfoResponsePBImpl(
+ proxy.getQueueUserAcls(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagerPBClientImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagerPBClientImpl.java
new file mode 100644
index 0000000..ffb03d1
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagerPBClientImpl.java
@@ -0,0 +1,92 @@
+package org.apache.hadoop.yarn.api.impl.pb.client;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerStatusRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerStatusResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainerRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainerResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainerRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainerResponsePBImpl;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine;
+import org.apache.hadoop.yarn.proto.ContainerManager.ContainerManagerService;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainerRequestProto;
+
+import com.google.protobuf.ServiceException;
+
+public class ContainerManagerPBClientImpl implements ContainerManager {
+
+ private ContainerManagerService.BlockingInterface proxy;
+
+ public ContainerManagerPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException {
+ RPC.setProtocolEngine(conf, ContainerManagerService.BlockingInterface.class, ProtoOverHadoopRpcEngine.class);
+ proxy = (ContainerManagerService.BlockingInterface)RPC.getProxy(
+ ContainerManagerService.BlockingInterface.class, clientVersion, addr, conf);
+ }
+
+ @Override
+ public GetContainerStatusResponse getContainerStatus(
+ GetContainerStatusRequest request) throws YarnRemoteException {
+ GetContainerStatusRequestProto requestProto = ((GetContainerStatusRequestPBImpl)request).getProto();
+ try {
+ return new GetContainerStatusResponsePBImpl(proxy.getContainerStatus(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public StartContainerResponse startContainer(StartContainerRequest request)
+ throws YarnRemoteException {
+ StartContainerRequestProto requestProto = ((StartContainerRequestPBImpl)request).getProto();
+ try {
+ return new StartContainerResponsePBImpl(proxy.startContainer(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public StopContainerResponse stopContainer(StopContainerRequest request)
+ throws YarnRemoteException {
+ StopContainerRequestProto requestProto = ((StopContainerRequestPBImpl)request).getProto();
+ try {
+ return new StopContainerResponsePBImpl(proxy.stopContainer(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/AMRMProtocolPBServiceImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/AMRMProtocolPBServiceImpl.java
new file mode 100644
index 0000000..7ead9a7
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/AMRMProtocolPBServiceImpl.java
@@ -0,0 +1,70 @@
+package org.apache.hadoop.yarn.api.impl.pb.service;
+
+import org.apache.hadoop.yarn.api.AMRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationMasterRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationMasterResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterResponsePBImpl;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.proto.AMRMProtocol.AMRMProtocolService.BlockingInterface;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterResponseProto;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+public class AMRMProtocolPBServiceImpl implements BlockingInterface {
+
+ private AMRMProtocol real;
+
+ public AMRMProtocolPBServiceImpl(AMRMProtocol impl) {
+ this.real = impl;
+ }
+
+ @Override
+ public AllocateResponseProto allocate(RpcController arg0,
+ AllocateRequestProto proto) throws ServiceException {
+ AllocateRequestPBImpl request = new AllocateRequestPBImpl(proto);
+ try {
+ AllocateResponse response = real.allocate(request);
+ return ((AllocateResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public FinishApplicationMasterResponseProto finishApplicationMaster(
+ RpcController arg0, FinishApplicationMasterRequestProto proto)
+ throws ServiceException {
+ FinishApplicationMasterRequestPBImpl request = new FinishApplicationMasterRequestPBImpl(proto);
+ try {
+ FinishApplicationMasterResponse response = real.finishApplicationMaster(request);
+ return ((FinishApplicationMasterResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public RegisterApplicationMasterResponseProto registerApplicationMaster(
+ RpcController arg0, RegisterApplicationMasterRequestProto proto)
+ throws ServiceException {
+ RegisterApplicationMasterRequestPBImpl request = new RegisterApplicationMasterRequestPBImpl(proto);
+ try {
+ RegisterApplicationMasterResponse response = real.registerApplicationMaster(request);
+ return ((RegisterApplicationMasterResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java
new file mode 100644
index 0000000..de62305
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java
@@ -0,0 +1,179 @@
+package org.apache.hadoop.yarn.api.impl.pb.service;
+
+import org.apache.hadoop.yarn.api.ClientRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllApplicationsRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllApplicationsResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationIdRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationIdResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.proto.ClientRMProtocol.ClientRMProtocolService.BlockingInterface;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllApplicationsRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllApplicationsResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationIdRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationIdResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationResponseProto;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+public class ClientRMProtocolPBServiceImpl implements BlockingInterface {
+
+ private ClientRMProtocol real;
+
+ public ClientRMProtocolPBServiceImpl(ClientRMProtocol impl) {
+ this.real = impl;
+ }
+
+ @Override
+ public FinishApplicationResponseProto finishApplication(RpcController arg0,
+ FinishApplicationRequestProto proto) throws ServiceException {
+ FinishApplicationRequestPBImpl request = new FinishApplicationRequestPBImpl(proto);
+ try {
+ FinishApplicationResponse response = real.finishApplication(request);
+ return ((FinishApplicationResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public GetApplicationReportResponseProto getApplicationReport(
+ RpcController arg0, GetApplicationReportRequestProto proto)
+ throws ServiceException {
+ GetApplicationReportRequestPBImpl request = new GetApplicationReportRequestPBImpl(proto);
+ try {
+ GetApplicationReportResponse response = real.getApplicationReport(request);
+ return ((GetApplicationReportResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public GetClusterMetricsResponseProto getClusterMetrics(RpcController arg0,
+ GetClusterMetricsRequestProto proto) throws ServiceException {
+ GetClusterMetricsRequestPBImpl request = new GetClusterMetricsRequestPBImpl(proto);
+ try {
+ GetClusterMetricsResponse response = real.getClusterMetrics(request);
+ return ((GetClusterMetricsResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public GetNewApplicationIdResponseProto getNewApplicationId(
+ RpcController arg0, GetNewApplicationIdRequestProto proto)
+ throws ServiceException {
+ GetNewApplicationIdRequestPBImpl request = new GetNewApplicationIdRequestPBImpl(proto);
+ try {
+ GetNewApplicationIdResponse response = real.getNewApplicationId(request);
+ return ((GetNewApplicationIdResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public SubmitApplicationResponseProto submitApplication(RpcController arg0,
+ SubmitApplicationRequestProto proto) throws ServiceException {
+ SubmitApplicationRequestPBImpl request = new SubmitApplicationRequestPBImpl(proto);
+ try {
+ SubmitApplicationResponse response = real.submitApplication(request);
+ return ((SubmitApplicationResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public GetAllApplicationsResponseProto getAllApplications(
+ RpcController controller, GetAllApplicationsRequestProto proto)
+ throws ServiceException {
+ GetAllApplicationsRequestPBImpl request =
+ new GetAllApplicationsRequestPBImpl(proto);
+ try {
+ GetAllApplicationsResponse response = real.getAllApplications(request);
+ return ((GetAllApplicationsResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public GetClusterNodesResponseProto getClusterNodes(RpcController controller,
+ GetClusterNodesRequestProto proto) throws ServiceException {
+ GetClusterNodesRequestPBImpl request =
+ new GetClusterNodesRequestPBImpl(proto);
+ try {
+ GetClusterNodesResponse response = real.getClusterNodes(request);
+ return ((GetClusterNodesResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public GetQueueInfoResponseProto getQueueInfo(RpcController controller,
+ GetQueueInfoRequestProto proto) throws ServiceException {
+ GetQueueInfoRequestPBImpl request =
+ new GetQueueInfoRequestPBImpl(proto);
+ try {
+ GetQueueInfoResponse response = real.getQueueInfo(request);
+ return ((GetQueueInfoResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public GetQueueUserAclsInfoResponseProto getQueueUserAcls(
+ RpcController controller, GetQueueUserAclsInfoRequestProto proto)
+ throws ServiceException {
+ GetQueueUserAclsInfoRequestPBImpl request =
+ new GetQueueUserAclsInfoRequestPBImpl(proto);
+ try {
+ GetQueueUserAclsInfoResponse response = real.getQueueUserAcls(request);
+ return ((GetQueueUserAclsInfoResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ContainerManagerPBServiceImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ContainerManagerPBServiceImpl.java
new file mode 100644
index 0000000..67dd924
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ContainerManagerPBServiceImpl.java
@@ -0,0 +1,69 @@
+package org.apache.hadoop.yarn.api.impl.pb.service;
+
+import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerStatusRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerStatusResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainerRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainerResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainerRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainerResponsePBImpl;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.proto.ContainerManager.ContainerManagerService.BlockingInterface;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainerRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainerResponseProto;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+public class ContainerManagerPBServiceImpl implements BlockingInterface {
+
+ private ContainerManager real;
+
+ public ContainerManagerPBServiceImpl(ContainerManager impl) {
+ this.real = impl;
+ }
+
+ @Override
+ public GetContainerStatusResponseProto getContainerStatus(RpcController arg0,
+ GetContainerStatusRequestProto proto) throws ServiceException {
+ GetContainerStatusRequestPBImpl request = new GetContainerStatusRequestPBImpl(proto);
+ try {
+ GetContainerStatusResponse response = real.getContainerStatus(request);
+ return ((GetContainerStatusResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public StartContainerResponseProto startContainer(RpcController arg0,
+ StartContainerRequestProto proto) throws ServiceException {
+ StartContainerRequestPBImpl request = new StartContainerRequestPBImpl(proto);
+ try {
+ StartContainerResponse response = real.startContainer(request);
+ return ((StartContainerResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public StopContainerResponseProto stopContainer(RpcController arg0,
+ StopContainerRequestProto proto) throws ServiceException {
+ StopContainerRequestPBImpl request = new StopContainerRequestPBImpl(proto);
+ try {
+ StopContainerResponse response = real.stopContainer(request);
+ return ((StopContainerResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
new file mode 100644
index 0000000..2c80adf
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -0,0 +1,96 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.conf;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.Splitter;
+import java.util.Iterator;
+import org.apache.hadoop.conf.Configuration;
+
+public class YarnConfiguration extends Configuration {
+ private static final Splitter ADDR_SPLITTER = Splitter.on(':').trimResults();
+ private static final Joiner JOINER = Joiner.on("");
+
+ public static final String RM_PREFIX = "yarn.server.resourcemanager.";
+
+ public static final String SCHEDULER_ADDRESS = RM_PREFIX
+ + "scheduler.address";
+
+ public static final String AM_EXPIRY_INTERVAL = RM_PREFIX
+ + "application.expiry.interval";
+
+ public static final String DEFAULT_SCHEDULER_BIND_ADDRESS = "0.0.0.0:8030";
+
+ public static final String APPSMANAGER_ADDRESS = RM_PREFIX
+ + "appsManager.address";
+
+ public static final String YARN_SECURITY_INFO =
+ "yarn.security.info.class.name";
+
+ public static final String DEFAULT_APPSMANAGER_BIND_ADDRESS =
+ "0.0.0.0:8040";
+
+ private static final String YARN_DEFAULT_XML_FILE = "yarn-default.xml";
+ private static final String YARN_SITE_XML_FILE = "yarn-site.xml";
+
+ public static final String APPLICATION_MANAGER_PRINCIPAL =
+ "yarn.jobmanager.user-name";
+
+ public static final String RM_WEBAPP_BIND_ADDRESS = RM_PREFIX
+ + "webapp.address";
+
+ public static final String DEFAULT_RM_WEBAPP_BIND_ADDRESS = "0.0.0.0:8088";
+
+ static {
+ Configuration.addDefaultResource(YARN_DEFAULT_XML_FILE);
+ Configuration.addDefaultResource(YARN_SITE_XML_FILE);
+ }
+
+ public static final String RM_SERVER_PRINCIPAL_KEY =
+ "yarn.resourcemanager.principal";
+
+ public static final String APPLICATION_ACL_VIEW_APP =
+ "application.acl-view-job";
+
+ public static final String APPLICATION_ACL_MODIFY_APP =
+ "application.acl-modify-job";
+
+ public YarnConfiguration() {
+ super();
+ }
+
+ public YarnConfiguration(Configuration conf) {
+ super(conf);
+ if (! (conf instanceof YarnConfiguration)) {
+ this.reloadConfiguration();
+ }
+ }
+
+ public static String getRMWebAppURL(Configuration conf) {
+ String addr = conf.get(RM_WEBAPP_BIND_ADDRESS,
+ DEFAULT_RM_WEBAPP_BIND_ADDRESS);
+ Iterator<String> it = ADDR_SPLITTER.split(addr).iterator();
+ it.next(); // ignore the bind host
+ String port = it.next();
+ // Use apps manager address to figure out the host for webapp
+ addr = conf.get(APPSMANAGER_ADDRESS, DEFAULT_APPSMANAGER_BIND_ADDRESS);
+ String host = ADDR_SPLITTER.split(addr).iterator().next();
+ return JOINER.join("http://", host, ":", port, "/");
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AbstractEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AbstractEvent.java
new file mode 100644
index 0000000..be07ec3
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AbstractEvent.java
@@ -0,0 +1,57 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.event;
+
+/**
+ * parent class of all the events. All events extend this class.
+ */
+public abstract class AbstractEvent<TYPE extends Enum<TYPE>>
+ implements Event<TYPE> {
+
+ private final TYPE type;
+ private final long timestamp;
+
+ // use this if you DON'T care about the timestamp
+ public AbstractEvent(TYPE type) {
+ this.type = type;
+ // We're not generating a real timestamp here. It's too expensive.
+ timestamp = -1L;
+ }
+
+ // use this if you care about the timestamp
+ public AbstractEvent(TYPE type, long timestamp) {
+ this.type = type;
+ this.timestamp = timestamp;
+ }
+
+ @Override
+ public long getTimestamp() {
+ return timestamp;
+ }
+
+ @Override
+ public TYPE getType() {
+ return type;
+ }
+
+ @Override
+ public String toString() {
+ return "EventType: " + getType();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
new file mode 100644
index 0000000..28b9582
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
@@ -0,0 +1,197 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.event;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.service.AbstractService;
+
+/**
+ * Dispatches events in a separate thread. Currently only single thread does
+ * that. Potentially there could be multiple channels for each event type
+ * class and a thread pool can be used to dispatch the events.
+ */
+@SuppressWarnings("rawtypes")
+public class AsyncDispatcher extends AbstractService implements Dispatcher {
+
+ private static final Log LOG = LogFactory.getLog(AsyncDispatcher.class);
+
+ private final BlockingQueue<Event> eventQueue;
+ private volatile boolean stopped = false;
+
+ private Thread eventHandlingThread;
+ protected final Map<Class<? extends Enum>, EventHandler> eventDispatchers;
+
+ public AsyncDispatcher() {
+ this(new HashMap<Class<? extends Enum>, EventHandler>(),
+ new LinkedBlockingQueue<Event>());
+ }
+
+ AsyncDispatcher(
+ Map<Class<? extends Enum>, EventHandler> eventDispatchers,
+ BlockingQueue<Event> eventQueue) {
+ super("Dispatcher");
+ this.eventQueue = eventQueue;
+ this.eventDispatchers = eventDispatchers;
+ }
+
+ Runnable createThread() {
+ return new Runnable() {
+ @Override
+ public void run() {
+ while (!stopped && !Thread.currentThread().isInterrupted()) {
+ Event event;
+ try {
+ event = eventQueue.take();
+ } catch(InterruptedException ie) {
+ LOG.info("AsyncDispatcher thread interrupted", ie);
+ return;
+ }
+ if (event != null) {
+ dispatch(event);
+ }
+ }
+ }
+ };
+ }
+
+ @Override
+ public void start() {
+ //start all the components
+ super.start();
+ eventHandlingThread = new Thread(createThread());
+ eventHandlingThread.start();
+ }
+
+ @Override
+ public void stop() {
+ stopped = true;
+ eventHandlingThread.interrupt();
+ try {
+ eventHandlingThread.join();
+ } catch (InterruptedException ie) {
+ LOG.debug("Interruped Exception while stopping", ie);
+ }
+
+ //stop all the components
+ super.stop();
+ }
+
+ @SuppressWarnings("unchecked")
+ protected void dispatch(Event event) {
+ //all events go thru this loop
+ LOG.debug("Dispatching the event " + event.getClass().getName() + "."
+ + event.toString());
+
+ Class<? extends Enum> type = event.getType().getDeclaringClass();
+
+ try{
+ eventDispatchers.get(type).handle(event);
+ }
+ catch (Throwable t) {
+ //TODO Maybe log the state of the queue
+ LOG.fatal("Error in dispatcher thread. Exiting..", t);
+ System.exit(-1);
+ }
+ }
+
+ @Override
+ @SuppressWarnings("rawtypes")
+ public void register(Class<? extends Enum> eventType,
+ EventHandler handler) {
+ /* check to see if we have a listener registered */
+ @SuppressWarnings("unchecked")
+ EventHandler<Event> registeredHandler = (EventHandler<Event>)
+ eventDispatchers.get(eventType);
+ LOG.info("Registering " + eventType + " for " + handler.getClass());
+ if (registeredHandler == null) {
+ eventDispatchers.put(eventType, handler);
+ } else if (!(registeredHandler instanceof MultiListenerHandler)){
+ /* for multiple listeners of an event add the multiple listener handler */
+ MultiListenerHandler multiHandler = new MultiListenerHandler();
+ multiHandler.addHandler(registeredHandler);
+ multiHandler.addHandler(handler);
+ eventDispatchers.put(eventType, multiHandler);
+ } else {
+ /* already a multilistener, just add to it */
+ MultiListenerHandler multiHandler
+ = (MultiListenerHandler) registeredHandler;
+ multiHandler.addHandler(handler);
+ }
+ }
+
+ @Override
+ public EventHandler getEventHandler() {
+ return new GenericEventHandler();
+ }
+
+ class GenericEventHandler implements EventHandler<Event> {
+ public void handle(Event event) {
+ /* all this method does is enqueue all the events onto the queue */
+ int qSize = eventQueue.size();
+ if (qSize !=0 && qSize %1000 == 0) {
+ LOG.info("Size of event-queue is " + qSize);
+ }
+ int remCapacity = eventQueue.remainingCapacity();
+ if (remCapacity < 1000) {
+ LOG.info("Very low remaining capacity in the event-queue: "
+ + remCapacity);
+ }
+ try {
+ eventQueue.put(event);
+ } catch (InterruptedException e) {
+ throw new YarnException(e);
+ }
+ };
+ }
+
+ /**
+ * Multiplexing an event. Sending it to different handlers that
+ * are interested in the event.
+ * @param <T> the type of event these multiple handlers are interested in.
+ */
+ @SuppressWarnings("rawtypes")
+ static class MultiListenerHandler implements EventHandler<Event> {
+ List<EventHandler<Event>> listofHandlers;
+
+ public MultiListenerHandler() {
+ listofHandlers = new ArrayList<EventHandler<Event>>();
+ }
+
+ @Override
+ public void handle(Event event) {
+ for (EventHandler<Event> handler: listofHandlers) {
+ handler.handle(event);
+ }
+ }
+
+ void addHandler(EventHandler<Event> handler) {
+ listofHandlers.add(handler);
+ }
+
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
new file mode 100644
index 0000000..40c0775
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
@@ -0,0 +1,32 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.event;
+
+/**
+ * Event Dispatcher interface. It dispatches events to registered
+ * event handlers based on event types.
+ *
+ */
+public interface Dispatcher {
+
+ EventHandler getEventHandler();
+
+ void register(Class<? extends Enum> eventType, EventHandler handler);
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Event.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Event.java
new file mode 100644
index 0000000..f0eb07a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Event.java
@@ -0,0 +1,30 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.event;
+
+/**
+ * Interface defining events api.
+ *
+ */
+public interface Event<TYPE extends Enum<TYPE>> {
+
+ TYPE getType();
+ long getTimestamp();
+ String toString();
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventHandler.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventHandler.java
new file mode 100644
index 0000000..a9df2cd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventHandler.java
@@ -0,0 +1,30 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.event;
+
+/**
+ * Interface for handling events of type T
+ *
+ * @param <T> paremeterized event of type T
+ */
+public interface EventHandler<T extends Event> {
+
+ void handle(T event);
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/RecordFactory.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/RecordFactory.java
new file mode 100644
index 0000000..c86efdc
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/RecordFactory.java
@@ -0,0 +1,8 @@
+package org.apache.hadoop.yarn.factories;
+
+import org.apache.hadoop.yarn.YarnException;
+
+
+public interface RecordFactory {
+ public <T> T newRecordInstance(Class<T> clazz) throws YarnException;
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/RpcClientFactory.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/RpcClientFactory.java
new file mode 100644
index 0000000..8fa3793
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/RpcClientFactory.java
@@ -0,0 +1,12 @@
+package org.apache.hadoop.yarn.factories;
+
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.YarnException;
+
+public interface RpcClientFactory {
+
+ public Object getClient(Class<?> protocol, long clientVersion, InetSocketAddress addr, Configuration conf) throws YarnException;
+
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/RpcServerFactory.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/RpcServerFactory.java
new file mode 100644
index 0000000..8bad78e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/RpcServerFactory.java
@@ -0,0 +1,18 @@
+package org.apache.hadoop.yarn.factories;
+
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.RPC.Server;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.YarnException;
+
+public interface RpcServerFactory {
+
+ public Server getServer(Class<?> protocol, Object instance,
+ InetSocketAddress addr, Configuration conf,
+ SecretManager<? extends TokenIdentifier> secretManager,
+ int numHandlers)
+ throws YarnException;
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/YarnRemoteExceptionFactory.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/YarnRemoteExceptionFactory.java
new file mode 100644
index 0000000..085da8d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/YarnRemoteExceptionFactory.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.yarn.factories;
+
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+
+public interface YarnRemoteExceptionFactory {
+ public YarnRemoteException createYarnRemoteException(String message);
+ public YarnRemoteException createYarnRemoteException(String message, Throwable t);
+ public YarnRemoteException createYarnRemoteException(Throwable t);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RecordFactoryPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RecordFactoryPBImpl.java
new file mode 100644
index 0000000..5cfec03
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RecordFactoryPBImpl.java
@@ -0,0 +1,95 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.factories.impl.pb;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+
+public class RecordFactoryPBImpl implements RecordFactory {
+
+ private static final String PB_IMPL_PACKAGE_SUFFIX = "impl.pb";
+ private static final String PB_IMPL_CLASS_SUFFIX = "PBImpl";
+
+ private static final RecordFactoryPBImpl self = new RecordFactoryPBImpl();
+ private Configuration localConf = new Configuration();
+ private ConcurrentMap<Class<?>, Constructor<?>> cache = new ConcurrentHashMap<Class<?>, Constructor<?>>();
+
+ private RecordFactoryPBImpl() {
+ }
+
+ public static RecordFactory get() {
+ return self;
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public <T> T newRecordInstance(Class<T> clazz) throws YarnException {
+
+ Constructor<?> constructor = cache.get(clazz);
+ if (constructor == null) {
+ Class<?> pbClazz = null;
+ try {
+ pbClazz = localConf.getClassByName(getPBImplClassName(clazz));
+ } catch (ClassNotFoundException e) {
+ throw new YarnException("Failed to load class: ["
+ + getPBImplClassName(clazz) + "]", e);
+ }
+ try {
+ constructor = pbClazz.getConstructor();
+ constructor.setAccessible(true);
+ cache.putIfAbsent(clazz, constructor);
+ } catch (NoSuchMethodException e) {
+ throw new YarnException("Could not find 0 argument constructor", e);
+ }
+ }
+ try {
+ Object retObject = constructor.newInstance();
+ return (T)retObject;
+ } catch (InvocationTargetException e) {
+ throw new YarnException(e);
+ } catch (IllegalAccessException e) {
+ throw new YarnException(e);
+ } catch (InstantiationException e) {
+ throw new YarnException(e);
+ }
+ }
+
+ private String getPBImplClassName(Class<?> clazz) {
+ String srcPackagePart = getPackageName(clazz);
+ String srcClassName = getClassName(clazz);
+ String destPackagePart = srcPackagePart + "." + PB_IMPL_PACKAGE_SUFFIX;
+ String destClassPart = srcClassName + PB_IMPL_CLASS_SUFFIX;
+ return destPackagePart + "." + destClassPart;
+ }
+
+ private String getClassName(Class<?> clazz) {
+ String fqName = clazz.getName();
+ return (fqName.substring(fqName.lastIndexOf(".") + 1, fqName.length()));
+ }
+
+ private String getPackageName(Class<?> clazz) {
+ return clazz.getPackage().getName();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
new file mode 100644
index 0000000..e6567ce
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
@@ -0,0 +1,96 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.factories.impl.pb;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.net.InetSocketAddress;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.factories.RpcClientFactory;
+
+public class RpcClientFactoryPBImpl implements RpcClientFactory {
+
+ private static final String PB_IMPL_PACKAGE_SUFFIX = "impl.pb.client";
+ private static final String PB_IMPL_CLASS_SUFFIX = "PBClientImpl";
+
+ private static final RpcClientFactoryPBImpl self = new RpcClientFactoryPBImpl();
+ private Configuration localConf = new Configuration();
+ private ConcurrentMap<Class<?>, Constructor<?>> cache = new ConcurrentHashMap<Class<?>, Constructor<?>>();
+
+ public static RpcClientFactoryPBImpl get() {
+ return RpcClientFactoryPBImpl.self;
+ }
+
+ private RpcClientFactoryPBImpl() {
+ }
+
+ public Object getClient(Class<?> protocol, long clientVersion, InetSocketAddress addr, Configuration conf) throws YarnException {
+
+ Constructor<?> constructor = cache.get(protocol);
+ if (constructor == null) {
+ Class<?> pbClazz = null;
+ try {
+ pbClazz = localConf.getClassByName(getPBImplClassName(protocol));
+ } catch (ClassNotFoundException e) {
+ throw new YarnException("Failed to load class: ["
+ + getPBImplClassName(protocol) + "]", e);
+ }
+ try {
+ constructor = pbClazz.getConstructor(Long.TYPE, InetSocketAddress.class, Configuration.class);
+ constructor.setAccessible(true);
+ cache.putIfAbsent(protocol, constructor);
+ } catch (NoSuchMethodException e) {
+ throw new YarnException("Could not find constructor with params: " + Long.TYPE + ", " + InetSocketAddress.class + ", " + Configuration.class, e);
+ }
+ }
+ try {
+ Object retObject = constructor.newInstance(clientVersion, addr, conf);
+ return retObject;
+ } catch (InvocationTargetException e) {
+ throw new YarnException(e);
+ } catch (IllegalAccessException e) {
+ throw new YarnException(e);
+ } catch (InstantiationException e) {
+ throw new YarnException(e);
+ }
+ }
+
+
+
+ private String getPBImplClassName(Class<?> clazz) {
+ String srcPackagePart = getPackageName(clazz);
+ String srcClassName = getClassName(clazz);
+ String destPackagePart = srcPackagePart + "." + PB_IMPL_PACKAGE_SUFFIX;
+ String destClassPart = srcClassName + PB_IMPL_CLASS_SUFFIX;
+ return destPackagePart + "." + destClassPart;
+ }
+
+ private String getClassName(Class<?> clazz) {
+ String fqName = clazz.getName();
+ return (fqName.substring(fqName.lastIndexOf(".") + 1, fqName.length()));
+ }
+
+ private String getPackageName(Class<?> clazz) {
+ return clazz.getPackage().getName();
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
new file mode 100644
index 0000000..afb3d7e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
@@ -0,0 +1,160 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.factories.impl.pb;
+
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.net.InetSocketAddress;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RPC.Server;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.factories.RpcServerFactory;
+import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine;
+
+import com.google.protobuf.BlockingService;
+
+public class RpcServerFactoryPBImpl implements RpcServerFactory {
+
+ private static final String PROTO_GEN_PACKAGE_NAME = "org.apache.hadoop.yarn.proto";
+ private static final String PROTO_GEN_CLASS_SUFFIX = "Service";
+ private static final String PB_IMPL_PACKAGE_SUFFIX = "impl.pb.service";
+ private static final String PB_IMPL_CLASS_SUFFIX = "PBServiceImpl";
+
+ private static final RpcServerFactoryPBImpl self = new RpcServerFactoryPBImpl();
+
+ private Configuration localConf = new Configuration();
+ private ConcurrentMap<Class<?>, Constructor<?>> serviceCache = new ConcurrentHashMap<Class<?>, Constructor<?>>();
+ private ConcurrentMap<Class<?>, Method> protoCache = new ConcurrentHashMap<Class<?>, Method>();
+
+ public static RpcServerFactoryPBImpl get() {
+ return RpcServerFactoryPBImpl.self;
+ }
+
+
+ private RpcServerFactoryPBImpl() {
+ }
+
+ @Override
+ public Server getServer(Class<?> protocol, Object instance,
+ InetSocketAddress addr, Configuration conf,
+ SecretManager<? extends TokenIdentifier> secretManager, int numHandlers)
+ throws YarnException {
+
+ Constructor<?> constructor = serviceCache.get(protocol);
+ if (constructor == null) {
+ Class<?> pbServiceImplClazz = null;
+ try {
+ pbServiceImplClazz = localConf
+ .getClassByName(getPbServiceImplClassName(protocol));
+ } catch (ClassNotFoundException e) {
+ throw new YarnException("Failed to load class: ["
+ + getPbServiceImplClassName(protocol) + "]", e);
+ }
+ try {
+ constructor = pbServiceImplClazz.getConstructor(protocol);
+ constructor.setAccessible(true);
+ serviceCache.putIfAbsent(protocol, constructor);
+ } catch (NoSuchMethodException e) {
+ throw new YarnException("Could not find constructor with params: "
+ + Long.TYPE + ", " + InetSocketAddress.class + ", "
+ + Configuration.class, e);
+ }
+ }
+
+ Object service = null;
+ try {
+ service = constructor.newInstance(instance);
+ } catch (InvocationTargetException e) {
+ throw new YarnException(e);
+ } catch (IllegalAccessException e) {
+ throw new YarnException(e);
+ } catch (InstantiationException e) {
+ throw new YarnException(e);
+ }
+
+ Method method = protoCache.get(protocol);
+ if (method == null) {
+ Class<?> protoClazz = null;
+ try {
+ protoClazz = localConf.getClassByName(getProtoClassName(protocol));
+ } catch (ClassNotFoundException e) {
+ throw new YarnException("Failed to load class: ["
+ + getProtoClassName(protocol) + "]", e);
+ }
+ try {
+ method = protoClazz.getMethod("newReflectiveBlockingService", service.getClass().getInterfaces()[0]);
+ method.setAccessible(true);
+ protoCache.putIfAbsent(protocol, method);
+ } catch (NoSuchMethodException e) {
+ throw new YarnException(e);
+ }
+ }
+
+ try {
+ return createServer(addr, conf, secretManager, numHandlers,
+ (BlockingService)method.invoke(null, service));
+ } catch (InvocationTargetException e) {
+ throw new YarnException(e);
+ } catch (IllegalAccessException e) {
+ throw new YarnException(e);
+ } catch (IOException e) {
+ throw new YarnException(e);
+ }
+ }
+
+ private String getProtoClassName(Class<?> clazz) {
+ String srcClassName = getClassName(clazz);
+ return PROTO_GEN_PACKAGE_NAME + "." + srcClassName + "$" + srcClassName + PROTO_GEN_CLASS_SUFFIX;
+ }
+
+ private String getPbServiceImplClassName(Class<?> clazz) {
+ String srcPackagePart = getPackageName(clazz);
+ String srcClassName = getClassName(clazz);
+ String destPackagePart = srcPackagePart + "." + PB_IMPL_PACKAGE_SUFFIX;
+ String destClassPart = srcClassName + PB_IMPL_CLASS_SUFFIX;
+ return destPackagePart + "." + destClassPart;
+ }
+
+ private String getClassName(Class<?> clazz) {
+ String fqName = clazz.getName();
+ return (fqName.substring(fqName.lastIndexOf(".") + 1, fqName.length()));
+ }
+
+ private String getPackageName(Class<?> clazz) {
+ return clazz.getPackage().getName();
+ }
+
+ private Server createServer(InetSocketAddress addr, Configuration conf,
+ SecretManager<? extends TokenIdentifier> secretManager, int numHandlers,
+ BlockingService blockingService) throws IOException {
+ RPC.setProtocolEngine(conf, BlockingService.class, ProtoOverHadoopRpcEngine.class);
+ Server server = RPC.getServer(BlockingService.class, blockingService,
+ addr.getHostName(), addr.getPort(), numHandlers, false, conf,
+ secretManager);
+ return server;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/YarnRemoteExceptionFactoryPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/YarnRemoteExceptionFactoryPBImpl.java
new file mode 100644
index 0000000..857c001
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/YarnRemoteExceptionFactoryPBImpl.java
@@ -0,0 +1,52 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.factories.impl.pb;
+
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl;
+import org.apache.hadoop.yarn.factories.YarnRemoteExceptionFactory;
+
+public class YarnRemoteExceptionFactoryPBImpl implements
+ YarnRemoteExceptionFactory {
+
+ private static final YarnRemoteExceptionFactory self = new YarnRemoteExceptionFactoryPBImpl();
+
+ private YarnRemoteExceptionFactoryPBImpl() {
+ }
+
+ public static YarnRemoteExceptionFactory get() {
+ return self;
+ }
+
+ @Override
+ public YarnRemoteException createYarnRemoteException(String message) {
+ return new YarnRemoteExceptionPBImpl(message);
+ }
+
+ @Override
+ public YarnRemoteException createYarnRemoteException(String message,
+ Throwable t) {
+ return new YarnRemoteExceptionPBImpl(message, t);
+ }
+
+ @Override
+ public YarnRemoteException createYarnRemoteException(Throwable t) {
+ return new YarnRemoteExceptionPBImpl(t);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factory/providers/RecordFactoryProvider.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factory/providers/RecordFactoryProvider.java
new file mode 100644
index 0000000..7fb7e69
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factory/providers/RecordFactoryProvider.java
@@ -0,0 +1,62 @@
+package org.apache.hadoop.yarn.factory.providers;
+
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl;
+
+public class RecordFactoryProvider {
+
+ public static final String RPC_SERIALIZER_KEY = "org.apache.yarn.ipc.rpc.serializer.property";
+ public static final String RPC_SERIALIZER_DEFAULT = "protocolbuffers";
+
+ public static final String RECORD_FACTORY_CLASS_KEY = "org.apache.yarn.ipc.record.factory.class";
+
+ private static Configuration defaultConf;
+
+ static {
+ defaultConf = new Configuration();
+ }
+
+ private RecordFactoryProvider() {
+ }
+
+ public static RecordFactory getRecordFactory(Configuration conf) {
+ if (conf == null) {
+ //Assuming the default configuration has the correct factories set.
+ //Users can specify a particular factory by providing a configuration.
+ conf = defaultConf;
+ }
+ String recordFactoryClassName = conf.get(RECORD_FACTORY_CLASS_KEY);
+ if (recordFactoryClassName == null) {
+ String serializer = conf.get(RPC_SERIALIZER_KEY, RPC_SERIALIZER_DEFAULT);
+ if (serializer.equals(RPC_SERIALIZER_DEFAULT)) {
+ return RecordFactoryPBImpl.get();
+ } else {
+ throw new YarnException("Unknown serializer: [" + conf.get(RPC_SERIALIZER_KEY) + "]. Use keys: [" + RECORD_FACTORY_CLASS_KEY + "] to specify Record factory");
+ }
+ } else {
+ return (RecordFactory) getFactoryClassInstance(recordFactoryClassName);
+ }
+ }
+
+ private static Object getFactoryClassInstance(String factoryClassName) {
+ try {
+ Class clazz = Class.forName(factoryClassName);
+ Method method = clazz.getMethod("get", null);
+ method.setAccessible(true);
+ return method.invoke(null, null);
+ } catch (ClassNotFoundException e) {
+ throw new YarnException(e);
+ } catch (NoSuchMethodException e) {
+ throw new YarnException(e);
+ } catch (InvocationTargetException e) {
+ throw new YarnException(e);
+ } catch (IllegalAccessException e) {
+ throw new YarnException(e);
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factory/providers/RpcFactoryProvider.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factory/providers/RpcFactoryProvider.java
new file mode 100644
index 0000000..9300581
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factory/providers/RpcFactoryProvider.java
@@ -0,0 +1,78 @@
+package org.apache.hadoop.yarn.factory.providers;
+
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.factories.RpcClientFactory;
+import org.apache.hadoop.yarn.factories.RpcServerFactory;
+import org.apache.hadoop.yarn.factories.impl.pb.RpcClientFactoryPBImpl;
+import org.apache.hadoop.yarn.factories.impl.pb.RpcServerFactoryPBImpl;
+
+/**
+ * A public static get() method must be present in the Client/Server Factory implementation.
+ */
+public class RpcFactoryProvider {
+ private static final Log LOG = LogFactory.getLog(RpcFactoryProvider.class);
+ //TODO Move these keys to CommonConfigurationKeys
+ public static final String RPC_SERIALIZER_KEY = "org.apache.yarn.ipc.rpc.serializer.property";
+ public static final String RPC_SERIALIZER_DEFAULT = "protocolbuffers";
+
+ public static final String RPC_CLIENT_FACTORY_CLASS_KEY = "org.apache.yarn.ipc.client.factory.class";
+ public static final String RPC_SERVER_FACTORY_CLASS_KEY = "org.apache.yarn.ipc.server.factory.class";
+
+ private RpcFactoryProvider() {
+
+ }
+
+
+ public static RpcServerFactory getServerFactory(Configuration conf) {
+ if (conf == null) {
+ conf = new Configuration();
+ }
+ String serverFactoryClassName = conf.get(RPC_SERVER_FACTORY_CLASS_KEY);
+ if (serverFactoryClassName == null) {
+ if (conf.get(RPC_SERIALIZER_KEY, RPC_SERIALIZER_DEFAULT).equals(RPC_SERIALIZER_DEFAULT)) {
+ return RpcServerFactoryPBImpl.get();
+ } else {
+ throw new YarnException("Unknown serializer: [" + conf.get(RPC_SERIALIZER_KEY) + "]. Use keys: [" + RPC_CLIENT_FACTORY_CLASS_KEY + "][" + RPC_SERVER_FACTORY_CLASS_KEY + "] to specify factories");
+ }
+ } else {
+ return (RpcServerFactory) getFactoryClassInstance(serverFactoryClassName);
+ }
+ }
+
+ public static RpcClientFactory getClientFactory(Configuration conf) {
+ String clientFactoryClassName = conf.get(RPC_CLIENT_FACTORY_CLASS_KEY);
+ if (clientFactoryClassName == null) {
+ if (conf.get(RPC_SERIALIZER_KEY, RPC_SERIALIZER_DEFAULT).equals(RPC_SERIALIZER_DEFAULT)) {
+ return RpcClientFactoryPBImpl.get();
+ } else {
+ throw new YarnException("Unknown serializer: [" + conf.get(RPC_SERIALIZER_KEY) + "]. Use keys: [" + RPC_CLIENT_FACTORY_CLASS_KEY + "][" + RPC_SERVER_FACTORY_CLASS_KEY + "] to specify factories");
+ }
+ } else {
+ return(RpcClientFactory) getFactoryClassInstance(clientFactoryClassName);
+ }
+ }
+
+ private static Object getFactoryClassInstance(String factoryClassName) {
+ try {
+ Class clazz = Class.forName(factoryClassName);
+ Method method = clazz.getMethod("get", null);
+ method.setAccessible(true);
+ return method.invoke(null, null);
+ } catch (ClassNotFoundException e) {
+ throw new YarnException(e);
+ } catch (NoSuchMethodException e) {
+ throw new YarnException(e);
+ } catch (InvocationTargetException e) {
+ throw new YarnException(e);
+ } catch (IllegalAccessException e) {
+ throw new YarnException(e);
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factory/providers/YarnRemoteExceptionFactoryProvider.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factory/providers/YarnRemoteExceptionFactoryProvider.java
new file mode 100644
index 0000000..5aaf9f3
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factory/providers/YarnRemoteExceptionFactoryProvider.java
@@ -0,0 +1,54 @@
+package org.apache.hadoop.yarn.factory.providers;
+
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.factories.YarnRemoteExceptionFactory;
+import org.apache.hadoop.yarn.factories.impl.pb.YarnRemoteExceptionFactoryPBImpl;
+
+public class YarnRemoteExceptionFactoryProvider {
+
+ public static final String RPC_SERIALIZER_KEY = "org.apache.yarn.ipc.rpc.serializer.property";
+ public static final String RPC_SERIALIZER_DEFAULT = "protocolbuffers";
+
+ public static final String EXCEPTION_FACTORY_CLASS_KEY = "org.apache.yarn.ipc.exception.factory.class";
+
+ private YarnRemoteExceptionFactoryProvider() {
+ }
+
+ public static YarnRemoteExceptionFactory getYarnRemoteExceptionFactory(Configuration conf) {
+ if (conf == null) {
+ conf = new Configuration();
+ }
+ String recordFactoryClassName = conf.get(EXCEPTION_FACTORY_CLASS_KEY);
+ if (recordFactoryClassName == null) {
+ String serializer = conf.get(RPC_SERIALIZER_KEY, RPC_SERIALIZER_DEFAULT);
+ if (serializer.equals(RPC_SERIALIZER_DEFAULT)) {
+ return YarnRemoteExceptionFactoryPBImpl.get();
+ } else {
+ throw new YarnException("Unknown serializer: [" + conf.get(RPC_SERIALIZER_KEY) + "]. Use keys: [" + EXCEPTION_FACTORY_CLASS_KEY + "] to specify Exception factory");
+ }
+ } else {
+ return (YarnRemoteExceptionFactory) getFactoryClassInstance(recordFactoryClassName);
+ }
+ }
+
+ private static Object getFactoryClassInstance(String factoryClassName) {
+ try {
+ Class clazz = Class.forName(factoryClassName);
+ Method method = clazz.getMethod("get", null);
+ method.setAccessible(true);
+ return method.invoke(null, null);
+ } catch (ClassNotFoundException e) {
+ throw new YarnException(e);
+ } catch (NoSuchMethodException e) {
+ throw new YarnException(e);
+ } catch (InvocationTargetException e) {
+ throw new YarnException(e);
+ } catch (IllegalAccessException e) {
+ throw new YarnException(e);
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/AvroYarnRPC.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/AvroYarnRPC.java
new file mode 100644
index 0000000..0f8881f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/AvroYarnRPC.java
@@ -0,0 +1,63 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.ipc;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.avro.ipc.Server;
+import org.apache.avro.ipc.SocketServer;
+import org.apache.avro.ipc.SocketTransceiver;
+import org.apache.avro.specific.SpecificRequestor;
+import org.apache.avro.specific.SpecificResponder;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.YarnException;
+
+/*
+ * This uses Avro's simple Socket based RPC. Can be replaced with Netty based
+ * when Yarn is upgraded to Avro 1.4.
+ */
+public class AvroYarnRPC extends YarnRPC {
+
+ @Override
+ public Object getProxy(Class protocol,
+ InetSocketAddress addr, Configuration conf) {
+ try {
+ return SpecificRequestor.getClient(protocol, new SocketTransceiver(addr));
+ } catch (IOException e) {
+ throw new YarnException(e);
+ }
+ }
+
+ @Override
+ public Server getServer(Class protocol, Object instance,
+ InetSocketAddress addr, Configuration conf,
+ SecretManager<? extends TokenIdentifier> secretManager,
+ int numHandlers) {
+ try {
+ return new SocketServer(new SpecificResponder(protocol, instance),
+ addr);
+ } catch (IOException e) {
+ throw new YarnException(e);
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/HadoopYarnProtoRPC.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/HadoopYarnProtoRPC.java
new file mode 100644
index 0000000..ab90f7a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/HadoopYarnProtoRPC.java
@@ -0,0 +1,78 @@
+package org.apache.hadoop.yarn.ipc;
+
+import java.net.InetSocketAddress;
+
+import org.apache.avro.ipc.Server;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.factory.providers.RpcFactoryProvider;
+
+/**
+ * This uses Hadoop RPC. Uses a tunnel ProtoSpecificRpcEngine over
+ * Hadoop connection.
+ * This does not give cross-language wire compatibility, since the Hadoop
+ * RPC wire format is non-standard, but it does permit use of Protocol Buffers
+ * protocol versioning features for inter-Java RPCs.
+ */
+public class HadoopYarnProtoRPC extends YarnRPC {
+
+ private static final Log LOG = LogFactory.getLog(HadoopYarnRPC.class);
+
+ @Override
+ public Object getProxy(Class protocol, InetSocketAddress addr,
+ Configuration conf) {
+ Configuration myConf = new Configuration(conf);
+ LOG.info("Creating a HadoopYarnProtoRpc proxy for protocol " + protocol);
+ LOG.debug("Configured SecurityInfo class name is "
+ + myConf.get(YarnConfiguration.YARN_SECURITY_INFO));
+
+ return RpcFactoryProvider.getClientFactory(myConf).getClient(protocol, 1, addr, myConf);
+ }
+
+ @Override
+ public Server getServer(Class protocol, Object instance,
+ InetSocketAddress addr, Configuration conf,
+ SecretManager<? extends TokenIdentifier> secretManager,
+ int numHandlers) {
+ LOG.info("Creating a HadoopYarnProtoRpc server for protocol " + protocol +
+ " with " + numHandlers + " handlers");
+ LOG.info("Configured SecurityInfo class name is "
+ + conf.get(YarnConfiguration.YARN_SECURITY_INFO));
+
+ final RPC.Server hadoopServer;
+ hadoopServer =
+ RpcFactoryProvider.getServerFactory(conf).getServer(protocol, instance,
+ addr, conf, secretManager, numHandlers);
+
+ Server server = new Server() {
+ @Override
+ public void close() {
+ hadoopServer.stop();
+ }
+
+ @Override
+ public int getPort() {
+ return hadoopServer.getListenerAddress().getPort();
+ }
+
+ @Override
+ public void join() throws InterruptedException {
+ hadoopServer.join();
+ }
+
+ @Override
+ public void start() {
+ hadoopServer.start();
+ }
+ };
+ return server;
+
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/HadoopYarnRPC.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/HadoopYarnRPC.java
new file mode 100644
index 0000000..3358189
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/HadoopYarnRPC.java
@@ -0,0 +1,103 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.ipc;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.avro.ipc.Server;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.AvroSpecificRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+
+/**
+ * This uses Hadoop RPC. Uses a tunnel AvroSpecificRpcEngine over
+ * Hadoop connection.
+ * This does not give cross-language wire compatibility, since the Hadoop
+ * RPC wire format is non-standard, but it does permit use of Avro's protocol
+ * versioning features for inter-Java RPCs.
+ */
+public class HadoopYarnRPC extends YarnRPC {
+
+ private static final Log LOG = LogFactory.getLog(HadoopYarnRPC.class);
+
+ @Override
+ public Object getProxy(Class protocol, InetSocketAddress addr,
+ Configuration conf) {
+ Configuration myConf = new Configuration(conf);
+ LOG.info("Creating a HadoopYarnRpc proxy for protocol " + protocol);
+ LOG.debug("Configured SecurityInfo class name is "
+ + myConf.get(YarnConfiguration.YARN_SECURITY_INFO));
+ RPC.setProtocolEngine(myConf, protocol, AvroSpecificRpcEngine.class);
+ try {
+ return RPC.getProxy(protocol, 1, addr, myConf);
+ } catch (IOException e) {
+ throw new YarnException(e);
+ }
+ }
+
+ @Override
+ public Server getServer(Class protocol, Object instance,
+ InetSocketAddress addr, Configuration conf,
+ SecretManager<? extends TokenIdentifier> secretManager,
+ int numHandlers) {
+ LOG.info("Creating a HadoopYarnRpc server for protocol " + protocol +
+ " with " + numHandlers + " handlers");
+ LOG.info("Configured SecurityInfo class name is "
+ + conf.get(YarnConfiguration.YARN_SECURITY_INFO));
+ RPC.setProtocolEngine(conf, protocol, AvroSpecificRpcEngine.class);
+ final RPC.Server hadoopServer;
+ try {
+ hadoopServer = RPC.getServer(protocol, instance, addr.getHostName(),
+ addr.getPort(), numHandlers, false, conf, secretManager);
+ } catch (IOException e) {
+ throw new YarnException(e);
+ }
+ Server server = new Server() {
+ @Override
+ public void close() {
+ hadoopServer.stop();
+ }
+
+ @Override
+ public int getPort() {
+ return hadoopServer.getListenerAddress().getPort();
+ }
+
+ @Override
+ public void join() throws InterruptedException {
+ hadoopServer.join();
+ }
+
+ @Override
+ public void start() {
+ hadoopServer.start();
+ }
+ };
+ return server;
+
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java
new file mode 100644
index 0000000..399a275
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java
@@ -0,0 +1,386 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.ipc;
+
+import java.io.Closeable;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.net.InetSocketAddress;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import javax.net.SocketFactory;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.ipc.ProtocolProxy;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RpcEngine;
+import org.apache.hadoop.ipc.ClientCache;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl;
+import org.apache.hadoop.yarn.ipc.RpcProtos.ProtoSpecificRpcRequest;
+import org.apache.hadoop.yarn.ipc.RpcProtos.ProtoSpecificRpcResponse;
+
+import com.google.protobuf.BlockingService;
+import com.google.protobuf.Descriptors.MethodDescriptor;
+import com.google.protobuf.Message;
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+
+
+@InterfaceStability.Evolving
+public class ProtoOverHadoopRpcEngine implements RpcEngine {
+ private static final Log LOG = LogFactory.getLog(RPC.class);
+
+ private static final ClientCache CLIENTS=new ClientCache();
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
+ InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
+ SocketFactory factory, int rpcTimeout) throws IOException {
+
+ return new ProtocolProxy<T>(protocol, (T) Proxy.newProxyInstance(protocol
+ .getClassLoader(), new Class[] { protocol }, new Invoker(protocol,
+ addr, ticket, conf, factory, rpcTimeout)), false);
+ }
+
+ @Override
+ public void stopProxy(Object proxy) {
+ try {
+ ((Invoker) Proxy.getInvocationHandler(proxy)).close();
+ } catch (IOException e) {
+ LOG.warn("Error while stopping " + proxy, e);
+ }
+ }
+
+ private static class Invoker implements InvocationHandler, Closeable {
+ private Map<String, Message> returnTypes = new ConcurrentHashMap<String, Message>();
+ private boolean isClosed = false;
+ private Client.ConnectionId remoteId;
+ private Client client;
+
+ public Invoker(Class<?> protocol, InetSocketAddress addr,
+ UserGroupInformation ticket, Configuration conf, SocketFactory factory,
+ int rpcTimeout) throws IOException {
+ this.remoteId = Client.ConnectionId.getConnectionId(addr, protocol,
+ ticket, rpcTimeout, conf);
+ this.client = CLIENTS.getClient(conf, factory,
+ ProtoSpecificResponseWritable.class);
+ }
+
+ private ProtoSpecificRpcRequest constructRpcRequest(Method method,
+ Object[] params) throws ServiceException {
+ ProtoSpecificRpcRequest rpcRequest;
+ ProtoSpecificRpcRequest.Builder builder;
+
+ builder = ProtoSpecificRpcRequest.newBuilder();
+ builder.setMethodName(method.getName());
+
+ if (params.length != 2) { // RpcController + Message
+ throw new ServiceException("Too many parameters for request. Method: ["
+ + method.getName() + "]" + ", Expected: 2, Actual: "
+ + params.length);
+ }
+ if (params[1] == null) {
+ throw new ServiceException("null param while calling Method: ["
+ + method.getName() + "]");
+ }
+
+ Message param = (Message) params[1];
+ builder.setRequestProto(param.toByteString());
+
+ rpcRequest = builder.build();
+ return rpcRequest;
+ }
+
+ @Override
+ public Object invoke(Object proxy, Method method, Object[] args)
+ throws Throwable {
+ long startTime = 0;
+ if (LOG.isDebugEnabled()) {
+ startTime = System.currentTimeMillis();
+ }
+
+ ProtoSpecificRpcRequest rpcRequest = constructRpcRequest(method, args);
+ ProtoSpecificResponseWritable val = null;
+ try {
+ val = (ProtoSpecificResponseWritable) client.call(
+ new ProtoSpecificRequestWritable(rpcRequest), remoteId);
+ } catch (Exception e) {
+ throw new ServiceException(e);
+ }
+
+ ProtoSpecificRpcResponse response = val.message;
+
+ if (LOG.isDebugEnabled()) {
+ long callTime = System.currentTimeMillis() - startTime;
+ LOG.debug("Call: " + method.getName() + " " + callTime);
+ }
+
+ if (response.hasIsError() && response.getIsError() == true) {
+ YarnRemoteExceptionPBImpl exception = new YarnRemoteExceptionPBImpl(response.getException());
+ exception.fillInStackTrace();
+ ServiceException se = new ServiceException(exception);
+ throw se;
+ }
+
+ Message prototype = null;
+ try {
+ prototype = getReturnProtoType(method);
+ } catch (Exception e) {
+ throw new ServiceException(e);
+ }
+ Message actualReturnMessage = prototype.newBuilderForType()
+ .mergeFrom(response.getResponseProto()).build();
+ return actualReturnMessage;
+ }
+
+ public void close() throws IOException {
+ if (!isClosed) {
+ isClosed = true;
+ CLIENTS.stopClient(client);
+ }
+ }
+
+ private Message getReturnProtoType(Method method) throws Exception {
+ if (returnTypes.containsKey(method.getName())) {
+ return returnTypes.get(method.getName());
+ } else {
+ Class<?> returnType = method.getReturnType();
+
+ Method newInstMethod = returnType.getMethod("getDefaultInstance");
+ newInstMethod.setAccessible(true);
+ Message prototype = (Message) newInstMethod.invoke(null,
+ (Object[]) null);
+ returnTypes.put(method.getName(), prototype);
+ return prototype;
+ }
+ }
+ }
+
+ /**
+ * Writable Wrapper for Protocol Buffer Requests
+ */
+ private static class ProtoSpecificRequestWritable implements Writable {
+ ProtoSpecificRpcRequest message;
+
+ @SuppressWarnings("unused")
+ public ProtoSpecificRequestWritable() {
+ }
+
+ ProtoSpecificRequestWritable(ProtoSpecificRpcRequest message) {
+ this.message = message;
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(message.toByteArray().length);
+ out.write(message.toByteArray());
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ int length = in.readInt();
+ byte[] bytes = new byte[length];
+ in.readFully(bytes);
+ message = ProtoSpecificRpcRequest.parseFrom(bytes);
+ }
+ }
+
+ /**
+ * Writable Wrapper for Protocol Buffer Responses
+ */
+ public static class ProtoSpecificResponseWritable implements Writable {
+ ProtoSpecificRpcResponse message;
+
+ public ProtoSpecificResponseWritable() {
+ }
+
+ public ProtoSpecificResponseWritable(ProtoSpecificRpcResponse message) {
+ this.message = message;
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(message.toByteArray().length);
+ out.write(message.toByteArray());
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ int length = in.readInt();
+ byte[] bytes = new byte[length];
+ in.readFully(bytes);
+ message = ProtoSpecificRpcResponse.parseFrom(bytes);
+ }
+ }
+
+ @Override
+ public Object[] call(Method method, Object[][] params,
+ InetSocketAddress[] addrs, UserGroupInformation ticket, Configuration conf)
+ throws IOException, InterruptedException {
+ throw new UnsupportedOperationException();
+ }
+
+ // for unit testing only
+ @InterfaceAudience.Private
+ @InterfaceStability.Unstable
+ static Client getClient(Configuration conf) {
+ return CLIENTS.getClient(conf, SocketFactory.getDefault(),
+ ProtoSpecificResponseWritable.class);
+ }
+
+ public static class Server extends RPC.Server {
+
+ private BlockingService service;
+ private boolean verbose;
+//
+// /**
+// * Construct an RPC server.
+// *
+// * @param instance
+// * the instance whose methods will be called
+// * @param conf
+// * the configuration to use
+// * @param bindAddress
+// * the address to bind on to listen for connection
+// * @param port
+// * the port to listen for connections on
+// */
+// public Server(Object instance, Configuration conf, String bindAddress,
+// int port) throws IOException {
+// this(instance, conf, bindAddress, port, 1, false, null);
+// }
+
+ private static String classNameBase(String className) {
+ String[] names = className.split("\\.", -1);
+ if (names == null || names.length == 0) {
+ return className;
+ }
+ return names[names.length - 1];
+ }
+
+ /**
+ * Construct an RPC server.
+ *
+ * @param instance
+ * the instance whose methods will be called
+ * @param conf
+ * the configuration to use
+ * @param bindAddress
+ * the address to bind on to listen for connection
+ * @param port
+ * the port to listen for connections on
+ * @param numHandlers
+ * the number of method handler threads to run
+ * @param verbose
+ * whether each call should be logged
+ */
+ public Server(Object instance, Configuration conf, String bindAddress,
+ int port, int numHandlers, int numReaders,
+ int queueSizePerHandler, boolean verbose,
+ SecretManager<? extends TokenIdentifier> secretManager)
+ throws IOException {
+ super(bindAddress, port, ProtoSpecificRequestWritable.class, numHandlers,
+ numReaders, queueSizePerHandler, conf, classNameBase(instance.getClass().getName()), secretManager);
+ this.service = (BlockingService) instance;
+ this.verbose = verbose;
+ }
+
+ @Override
+ public Writable call(Class<?> protocol, Writable writableRequest,
+ long receiveTime) throws IOException {
+ ProtoSpecificRequestWritable request = (ProtoSpecificRequestWritable) writableRequest;
+ ProtoSpecificRpcRequest rpcRequest = request.message;
+ String methodName = rpcRequest.getMethodName();
+ System.out.println("Call: protocol=" + protocol.getCanonicalName() + ", method="
+ + methodName);
+ if (verbose)
+ log("Call: protocol=" + protocol.getCanonicalName() + ", method="
+ + methodName);
+ MethodDescriptor methodDescriptor = service.getDescriptorForType()
+ .findMethodByName(methodName);
+ Message prototype = service.getRequestPrototype(methodDescriptor);
+ Message param = prototype.newBuilderForType()
+ .mergeFrom(rpcRequest.getRequestProto()).build();
+ Message result;
+ try {
+ result = service.callBlockingMethod(methodDescriptor, null, param);
+ } catch (ServiceException e) {
+ e.printStackTrace();
+ return handleException(e);
+ } catch (Exception e) {
+ return handleException(e);
+ }
+
+ ProtoSpecificRpcResponse response = constructProtoSpecificRpcSuccessResponse(result);
+ return new ProtoSpecificResponseWritable(response);
+ }
+
+ private ProtoSpecificResponseWritable handleException(Throwable e) {
+ ProtoSpecificRpcResponse.Builder builder = ProtoSpecificRpcResponse
+ .newBuilder();
+ builder.setIsError(true);
+ if (e.getCause() instanceof YarnRemoteExceptionPBImpl) {
+ builder.setException(((YarnRemoteExceptionPBImpl) e.getCause())
+ .getProto());
+ } else {
+ builder.setException(new YarnRemoteExceptionPBImpl(e).getProto());
+ }
+ ProtoSpecificRpcResponse response = builder.build();
+ return new ProtoSpecificResponseWritable(response);
+ }
+
+ private ProtoSpecificRpcResponse constructProtoSpecificRpcSuccessResponse(
+ Message message) {
+ ProtoSpecificRpcResponse res = ProtoSpecificRpcResponse.newBuilder()
+ .setResponseProto(message.toByteString()).build();
+ return res;
+ }
+ }
+
+ private static void log(String value) {
+ if (value != null && value.length() > 55)
+ value = value.substring(0, 55) + "...";
+ LOG.info(value);
+ }
+
+ @Override
+ public RPC.Server getServer(Class<?> protocol, Object instance,
+ String bindAddress, int port, int numHandlers,int numReaders,
+ int queueSizePerHandler, boolean verbose,
+ Configuration conf, SecretManager<? extends TokenIdentifier> secretManager)
+ throws IOException {
+ return new Server(instance, conf, bindAddress, port, numHandlers, numReaders, queueSizePerHandler,
+ verbose, secretManager);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/RPCUtil.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/RPCUtil.java
new file mode 100644
index 0000000..798af12
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/RPCUtil.java
@@ -0,0 +1,55 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.ipc;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.YarnRemoteExceptionFactory;
+import org.apache.hadoop.yarn.factory.providers.YarnRemoteExceptionFactoryProvider;
+
+public class RPCUtil {
+
+
+ /**
+ * Relying on the default factory configuration to be set correctly
+ * for the default configuration.
+ */
+ private static Configuration conf = new Configuration();
+ private static YarnRemoteExceptionFactory exceptionFactory = YarnRemoteExceptionFactoryProvider.getYarnRemoteExceptionFactory(conf);
+
+ /**
+ * Returns the YarnRemoteException which is serializable.
+ */
+ public static YarnRemoteException getRemoteException(Throwable t) {
+ return exceptionFactory.createYarnRemoteException(t);
+ }
+
+ /**
+ * Returns the YarnRemoteException which is serializable.
+ */
+ public static YarnRemoteException getRemoteException(String message) {
+ return exceptionFactory.createYarnRemoteException(message);
+ }
+
+ public static String toString(YarnRemoteException e) {
+ return (e.getMessage() == null ? "" : e.getMessage()) +
+ (e.getRemoteTrace() == null ? "" : "\n StackTrace: " + e.getRemoteTrace()) +
+ (e.getCause() == null ? "" : "\n Caused by: " + toString(e.getCause()));
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/YarnRPC.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/YarnRPC.java
new file mode 100644
index 0000000..8b476d8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/YarnRPC.java
@@ -0,0 +1,65 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.ipc;
+
+import java.net.InetSocketAddress;
+
+import org.apache.avro.ipc.Server;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.YarnException;
+
+/**
+ * Abstraction to get the RPC implementation for Yarn.
+ */
+public abstract class YarnRPC {
+ private static final Log LOG = LogFactory.getLog(YarnRPC.class);
+
+ public static final String RPC_CLASSNAME
+ = "org.apache.hadoop.yarn.ipc.YarnRPC.classname";
+
+ //use the default as Hadoop RPC
+ public static final String DEFAULT_RPC
+ = "org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC";
+
+ public abstract Object getProxy(Class protocol, InetSocketAddress addr,
+ Configuration conf);
+
+ public abstract Server getServer(Class protocol, Object instance,
+ InetSocketAddress addr, Configuration conf,
+ SecretManager<? extends TokenIdentifier> secretManager,
+ int numHandlers);
+
+ public static YarnRPC create(Configuration conf) {
+ LOG.info("Creating YarnRPC for " + conf.get(RPC_CLASSNAME));
+ String clazzName = conf.get(RPC_CLASSNAME);
+ if (clazzName == null) {
+ clazzName = DEFAULT_RPC;
+ }
+ try {
+ return (YarnRPC) Class.forName(clazzName).newInstance();
+ } catch (Exception e) {
+ throw new YarnException(e);
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ApplicationTokenIdentifier.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ApplicationTokenIdentifier.java
new file mode 100644
index 0000000..5dbdc04
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ApplicationTokenIdentifier.java
@@ -0,0 +1,77 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.security;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+
+// TODO: Make it avro-ish. TokenIdentifier really isn't serialized
+// as writable but simply uses readFields method in SaslRpcServer
+// for deserializatoin.
+public class ApplicationTokenIdentifier extends TokenIdentifier {
+
+ public static final Text KIND_NAME = new Text("YARN_APPLICATION_TOKEN");
+
+ private Text appId;
+
+ // TODO: Add more information in the tokenID such that it is not
+ // transferrable, more secure etc.
+
+ public ApplicationTokenIdentifier(ApplicationId id) {
+ this.appId = new Text(Integer.toString(id.getId()));
+ }
+
+ public ApplicationTokenIdentifier() {
+ this.appId = new Text();
+ }
+
+ public Text getApplicationID() {
+ return appId;
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ appId.write(out);
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ appId.readFields(in);
+ }
+
+ @Override
+ public Text getKind() {
+ return KIND_NAME;
+ }
+
+ @Override
+ public UserGroupInformation getUser() {
+ if (appId == null || "".equals(appId.toString())) {
+ return null;
+ }
+ return UserGroupInformation.createRemoteUser(appId.toString());
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ApplicationTokenSecretManager.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ApplicationTokenSecretManager.java
new file mode 100644
index 0000000..0d83f40
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ApplicationTokenSecretManager.java
@@ -0,0 +1,78 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.security;
+
+import javax.crypto.SecretKey;
+
+import org.apache.hadoop.security.token.SecretManager;
+
+public class ApplicationTokenSecretManager extends
+ SecretManager<ApplicationTokenIdentifier> {
+
+ // TODO: mark as final
+ private SecretKey masterKey; // For now only one masterKey, for ever.
+
+ // TODO: add expiry for masterKey
+ // TODO: add logic to handle with multiple masterKeys, only one being used for
+ // creating new tokens at any time.
+ // TODO: Make he masterKey more secure, non-transferrable etc.
+
+ /**
+ * Default constructor
+ */
+ public ApplicationTokenSecretManager() {
+ this.masterKey = generateSecret();
+ }
+
+ // TODO: this should go away.
+ public void setMasterKey(SecretKey mk) {
+ this.masterKey = mk;
+ }
+
+ // TODO: this should go away.
+ public SecretKey getMasterKey() {
+ return masterKey;
+ }
+
+ /**
+ * Convert the byte[] to a secret key
+ * @param key the byte[] to create the secret key from
+ * @return the secret key
+ */
+ public static SecretKey createSecretKey(byte[] key) {
+ return SecretManager.createSecretKey(key);
+ }
+
+ @Override
+ public byte[] createPassword(ApplicationTokenIdentifier identifier) {
+ return createPassword(identifier.getBytes(), masterKey);
+ }
+
+ @Override
+ public byte[] retrievePassword(ApplicationTokenIdentifier identifier)
+ throws SecretManager.InvalidToken {
+ return createPassword(identifier.getBytes(), masterKey);
+ }
+
+ @Override
+ public ApplicationTokenIdentifier createIdentifier() {
+ return new ApplicationTokenIdentifier();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ApplicationTokenSelector.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ApplicationTokenSelector.java
new file mode 100644
index 0000000..083d9ad
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ApplicationTokenSelector.java
@@ -0,0 +1,54 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.security;
+
+import java.util.Collection;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenSelector;
+
+public class ApplicationTokenSelector implements
+ TokenSelector<ApplicationTokenIdentifier> {
+
+ private static final Log LOG = LogFactory
+ .getLog(ApplicationTokenSelector.class);
+
+ @SuppressWarnings("unchecked")
+ public Token<ApplicationTokenIdentifier> selectToken(Text service,
+ Collection<Token<? extends TokenIdentifier>> tokens) {
+ if (service == null) {
+ return null;
+ }
+ LOG.info("Looking for a token with service " + service.toString());
+ for (Token<? extends TokenIdentifier> token : tokens) {
+ LOG.info("Token kind is " + token.getKind().toString()
+ + " and the token's service name is " + token.getService());
+ if (ApplicationTokenIdentifier.KIND_NAME.equals(token.getKind())
+ && service.equals(token.getService())) {
+ return (Token<ApplicationTokenIdentifier>) token;
+ }
+ }
+ return null;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerManagerSecurityInfo.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerManagerSecurityInfo.java
new file mode 100644
index 0000000..aaf5ff0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerManagerSecurityInfo.java
@@ -0,0 +1,60 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.security;
+
+import java.lang.annotation.Annotation;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.security.token.TokenSelector;
+import org.apache.hadoop.yarn.proto.ContainerManager;
+
+public class ContainerManagerSecurityInfo extends SecurityInfo {
+
+ @Override
+ public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
+ return null;
+ }
+
+ @Override
+ public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
+ if (!protocol
+ .equals(ContainerManager.ContainerManagerService.BlockingInterface.class)) {
+ return null;
+ }
+ return new TokenInfo() {
+
+ @Override
+ public Class<? extends Annotation> annotationType() {
+ return null;
+ }
+
+ @Override
+ public Class<? extends TokenSelector<? extends TokenIdentifier>>
+ value() {
+ return ContainerTokenSelector.class;
+ }
+ };
+
+ }
+
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
new file mode 100644
index 0000000..7fa00bd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
@@ -0,0 +1,98 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.security;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+
+public class ContainerTokenIdentifier extends TokenIdentifier {
+
+ private static Log LOG = LogFactory
+ .getLog(ContainerTokenIdentifier.class);
+
+ public static final Text KIND = new Text("ContainerToken");
+
+ private ContainerId containerId;
+ private String nmHostName;
+ private Resource resource;
+
+ public ContainerTokenIdentifier(ContainerId containerID, String hostName, Resource r) {
+ this.containerId = containerID;
+ this.nmHostName = hostName;
+ this.resource = r;
+ }
+
+ public ContainerTokenIdentifier() {
+ }
+
+ public ContainerId getContainerID() {
+ return containerId;
+ }
+
+ public String getNmHostName() {
+ return nmHostName;
+ }
+
+ public Resource getResource() {
+ return resource;
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ LOG.debug("Writing ContainerTokenIdentifier to RPC layer");
+ out.writeInt(this.containerId.getAppId().getId());
+ out.writeInt(this.containerId.getId());
+ // TODO: Cluster time-stamp?
+ out.writeUTF(this.nmHostName);
+ out.writeInt(this.resource.getMemory()); // TODO: more resources.
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ this.containerId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ContainerId.class);
+ this.containerId.setAppId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class));
+ this.containerId.getAppId().setId(in.readInt());
+ this.containerId.setId(in.readInt());
+ this.nmHostName = in.readUTF();
+ this.resource = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(Resource.class);
+ this.resource.setMemory(in.readInt()); // TODO: more resources.
+ }
+
+ @Override
+ public Text getKind() {
+ return this.KIND;
+ }
+
+ @Override
+ public UserGroupInformation getUser() {
+ return UserGroupInformation.createRemoteUser(this.containerId.toString());
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenSelector.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenSelector.java
new file mode 100644
index 0000000..c613ba4
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenSelector.java
@@ -0,0 +1,46 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.security;
+
+import java.util.Collection;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenSelector;
+
+public class ContainerTokenSelector implements
+ TokenSelector<ContainerTokenIdentifier> {
+
+ @Override
+ public Token<ContainerTokenIdentifier> selectToken(Text service,
+ Collection<Token<? extends TokenIdentifier>> tokens) {
+ if (service == null) {
+ return null;
+ }
+ for (Token<? extends TokenIdentifier> token : tokens) {
+ if (ContainerTokenIdentifier.KIND.equals(token.getKind()) &&
+ service.equals(token.getService())) {
+ return (Token<ContainerTokenIdentifier>) token;
+ }
+ }
+ return null;
+ }
+
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/SchedulerSecurityInfo.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/SchedulerSecurityInfo.java
new file mode 100644
index 0000000..9f63b5f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/SchedulerSecurityInfo.java
@@ -0,0 +1,57 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.security;
+
+import java.lang.annotation.Annotation;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.security.token.TokenSelector;
+import org.apache.hadoop.yarn.proto.AMRMProtocol;
+
+public class SchedulerSecurityInfo extends SecurityInfo {
+
+ @Override
+ public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
+ return null;
+ }
+
+ @Override
+ public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
+ if (!protocol.equals(AMRMProtocol.AMRMProtocolService.BlockingInterface.class)) {
+ return null;
+ }
+ return new TokenInfo() {
+
+ @Override
+ public Class<? extends Annotation> annotationType() {
+ return null;
+ }
+
+ @Override
+ public Class<? extends TokenSelector<? extends TokenIdentifier>>
+ value() {
+ return ApplicationTokenSelector.class;
+ }
+ };
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientRMSecurityInfo.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientRMSecurityInfo.java
new file mode 100644
index 0000000..90e0855a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientRMSecurityInfo.java
@@ -0,0 +1,62 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.security.client;
+
+import java.lang.annotation.Annotation;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.proto.ClientRMProtocol;
+
+public class ClientRMSecurityInfo extends SecurityInfo {
+
+ @Override
+ public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
+ if (!protocol
+ .equals(ClientRMProtocol.ClientRMProtocolService.BlockingInterface.class)) {
+ return null;
+ }
+ return new KerberosInfo() {
+
+ @Override
+ public Class<? extends Annotation> annotationType() {
+ return null;
+ }
+
+ @Override
+ public String serverPrincipal() {
+ return YarnConfiguration.RM_SERVER_PRINCIPAL_KEY;
+ }
+
+ @Override
+ public String clientPrincipal() {
+ return null;
+ }
+ };
+ }
+
+ @Override
+ public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
+ return null;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMSecretManager.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMSecretManager.java
new file mode 100644
index 0000000..a67cd1e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMSecretManager.java
@@ -0,0 +1,104 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.security.client;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.crypto.SecretKey;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.yarn.security.ApplicationTokenIdentifier;
+
+public class ClientToAMSecretManager extends
+ SecretManager<ApplicationTokenIdentifier> {
+
+ private static Log LOG = LogFactory.getLog(ClientToAMSecretManager.class);
+
+ // Per application masterkeys for managing client-tokens
+ private Map<Text, SecretKey> masterKeys = new HashMap<Text, SecretKey>();
+
+ public void setMasterKey(ApplicationTokenIdentifier identifier, byte[] key) {
+ SecretKey sk = SecretManager.createSecretKey(key);
+ Text applicationID = identifier.getApplicationID();
+ this.masterKeys.put(applicationID, sk);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Setting master key for "
+ + applicationID
+ + " as "
+ + new String(Base64.encodeBase64(this.masterKeys.get(applicationID)
+ .getEncoded())));
+ }
+ }
+
+ private void addMasterKey(ApplicationTokenIdentifier identifier) {
+ Text applicationID = identifier.getApplicationID();
+ this.masterKeys.put(applicationID, generateSecret());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Creating master key for "
+ + applicationID
+ + " as "
+ + new String(Base64.encodeBase64(this.masterKeys.get(applicationID)
+ .getEncoded())));}
+ }
+
+ // TODO: Handle the masterKey invalidation.
+ public synchronized SecretKey getMasterKey(
+ ApplicationTokenIdentifier identifier) {
+ Text applicationID = identifier.getApplicationID();
+ if (!this.masterKeys.containsKey(applicationID)) {
+ addMasterKey(identifier);
+ }
+ return this.masterKeys.get(applicationID);
+ }
+
+ @Override
+ public synchronized byte[] createPassword(
+ ApplicationTokenIdentifier identifier) {
+ byte[] password =
+ createPassword(identifier.getBytes(), getMasterKey(identifier));
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Password created is "
+ + new String(Base64.encodeBase64(password)));
+ }
+ return password;
+ }
+
+ @Override
+ public byte[] retrievePassword(ApplicationTokenIdentifier identifier)
+ throws SecretManager.InvalidToken {
+ byte[] password =
+ createPassword(identifier.getBytes(), getMasterKey(identifier));
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Password retrieved is "
+ + new String(Base64.encodeBase64(password)));
+ }
+ return password;
+ }
+
+ @Override
+ public ApplicationTokenIdentifier createIdentifier() {
+ return new ApplicationTokenIdentifier();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/AbstractService.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/AbstractService.java
new file mode 100644
index 0000000..55383de
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/AbstractService.java
@@ -0,0 +1,113 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.service;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+
+public abstract class AbstractService implements Service {
+
+ private static final Log LOG = LogFactory.getLog(AbstractService.class);
+
+ private STATE state = STATE.NOTINITED;
+ private final String name;
+ private long startTime;
+ private Configuration config;
+ private List<ServiceStateChangeListener> listeners =
+ new ArrayList<ServiceStateChangeListener>();
+
+ public AbstractService(String name) {
+ this.name = name;
+ }
+
+ @Override
+ public synchronized STATE getServiceState() {
+ return state;
+ }
+
+ @Override
+ public synchronized void init(Configuration conf) {
+ ensureCurrentState(STATE.NOTINITED);
+ this.config = conf;
+ changeState(STATE.INITED);
+ LOG.info("Service:" + getName() + " is inited.");
+ }
+
+ @Override
+ public synchronized void start() {
+ startTime = System.currentTimeMillis();
+ ensureCurrentState(STATE.INITED);
+ changeState(STATE.STARTED);
+ LOG.info("Service:" + getName() + " is started.");
+ }
+
+ @Override
+ public synchronized void stop() {
+ if (state == STATE.STOPPED) {
+ return;//already stopped
+ }
+ ensureCurrentState(STATE.STARTED);
+ changeState(STATE.STOPPED);
+ LOG.info("Service:" + getName() + " is stopped.");
+ }
+
+ @Override
+ public synchronized void register(ServiceStateChangeListener l) {
+ listeners.add(l);
+ }
+
+ @Override
+ public synchronized void unregister(ServiceStateChangeListener l) {
+ listeners.remove(l);
+ }
+
+ @Override
+ public String getName() {
+ return name;
+ }
+
+ @Override
+ public synchronized Configuration getConfig() {
+ return config;
+ }
+
+ @Override
+ public long getStartTime() {
+ return startTime;
+ }
+
+ private void ensureCurrentState(STATE currentState) {
+ if (state != currentState) {
+ throw new IllegalStateException("For this operation, current State must " +
+ "be " + currentState + " instead of " + state);
+ }
+ }
+
+ private void changeState(STATE newState) {
+ state = newState;
+ //notify listeners
+ for (ServiceStateChangeListener l : listeners) {
+ l.stateChanged(this);
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/CompositeService.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/CompositeService.java
new file mode 100644
index 0000000..973ecc0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/CompositeService.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.YarnException;
+
+/**
+ * Composition of services.
+ */
+public class CompositeService extends AbstractService {
+
+ private static final Log LOG = LogFactory.getLog(CompositeService.class);
+
+ private List<Service> serviceList = new ArrayList<Service>();
+
+ public CompositeService(String name) {
+ super(name);
+ }
+
+ public Collection<Service> getServices() {
+ return Collections.unmodifiableList(serviceList);
+ }
+
+ protected synchronized void addService(Service service) {
+ serviceList.add(service);
+ }
+
+ protected synchronized boolean removeService(Service service) {
+ return serviceList.remove(service);
+ }
+
+ public synchronized void init(Configuration conf) {
+ for (Service service : serviceList) {
+ service.init(conf);
+ }
+ super.init(conf);
+ }
+
+ public synchronized void start() {
+ int i = 0;
+ try {
+ for (int n = serviceList.size(); i < n; i++) {
+ Service service = serviceList.get(i);
+ service.start();
+ }
+ } catch(Throwable e) {
+ LOG.error("Error starting services " + getName(), e);
+ for (int j = i-1; j >= 0; j--) {
+ Service service = serviceList.get(j);
+ try {
+ service.stop();
+ } catch(Throwable t) {
+ LOG.info("Error stopping " + service.getName(), t);
+ }
+ }
+ throw new YarnException("Failed to Start " + getName(), e);
+ }
+ super.start();
+ }
+
+ public synchronized void stop() {
+ //stop in reserve order of start
+ for (int i = serviceList.size() - 1; i >= 0; i--) {
+ Service service = serviceList.get(i);
+ service.stop();
+ }
+ super.stop();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/FilterService.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/FilterService.java
new file mode 100644
index 0000000..314d664
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/FilterService.java
@@ -0,0 +1,76 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.service;
+
+import org.apache.hadoop.conf.Configuration;
+
+public class FilterService implements Service {
+
+ private final Service service;
+ private final long startTime = System.currentTimeMillis();
+
+ public FilterService(Service service) {
+ this.service = service;
+ }
+
+ @Override
+ public void init(Configuration config) {
+ service.init(config);
+ }
+
+ @Override
+ public void start() {
+ service.start();
+ }
+
+ @Override
+ public void stop() {
+ service.stop();
+ }
+
+ @Override
+ public void register(ServiceStateChangeListener listener) {
+ service.register(listener);
+ }
+
+ @Override
+ public void unregister(ServiceStateChangeListener listener) {
+ service.unregister(listener);
+ }
+
+ @Override
+ public String getName() {
+ return service.getName();
+ }
+
+ @Override
+ public Configuration getConfig() {
+ return service.getConfig();
+ }
+
+ @Override
+ public STATE getServiceState() {
+ return service.getServiceState();
+ }
+
+ @Override
+ public long getStartTime() {
+ return startTime;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/Service.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/Service.java
new file mode 100644
index 0000000..722dd55
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/Service.java
@@ -0,0 +1,45 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.service;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Service LifeCycle.
+ */
+public interface Service {
+
+ public enum STATE {
+ NOTINITED,
+ INITED,
+ STARTED,
+ STOPPED;
+ }
+
+ void init(Configuration config);
+ void start();
+ void stop();
+ void register(ServiceStateChangeListener listener);
+ void unregister(ServiceStateChangeListener listener);
+
+ String getName();
+ Configuration getConfig();
+ STATE getServiceState();
+ long getStartTime();
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/ServiceStateChangeListener.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/ServiceStateChangeListener.java
new file mode 100644
index 0000000..cf10a6c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/ServiceStateChangeListener.java
@@ -0,0 +1,28 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.service;
+
+/**
+ * Interface to notify state changes of a service.
+ */
+public interface ServiceStateChangeListener {
+
+ void stateChanged(Service service);
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitonException.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitonException.java
new file mode 100644
index 0000000..aeef3a2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitonException.java
@@ -0,0 +1,42 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.state;
+
+import org.apache.hadoop.yarn.YarnException;
+
+public class InvalidStateTransitonException extends YarnException {
+
+ private Enum<?> currentState;
+ private Enum<?> event;
+
+ public InvalidStateTransitonException(Enum<?> currentState, Enum<?> event) {
+ super("Invalid event: " + event + " at " + currentState);
+ this.currentState = currentState;
+ this.event = event;
+ }
+
+ public Enum<?> getCurrentState() {
+ return currentState;
+ }
+
+ public Enum<?> getEvent() {
+ return event;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/MultipleArcTransition.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/MultipleArcTransition.java
new file mode 100644
index 0000000..180aba0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/MultipleArcTransition.java
@@ -0,0 +1,41 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.state;
+
+
+
+/**
+ * Hook for Transition.
+ * Post state is decided by Transition hook. Post state must be one of the
+ * valid post states registered in StateMachine.
+ */
+public interface MultipleArcTransition
+ <OPERAND, EVENT, STATE extends Enum<STATE>> {
+
+ /**
+ * Transition hook.
+ * @return the postState. Post state must be one of the
+ * valid post states registered in StateMachine.
+ * @param operand the entity attached to the FSM, whose internal
+ * state may change.
+ * @param event causal event
+ */
+ public STATE transition(OPERAND operand, EVENT event);
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/SingleArcTransition.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/SingleArcTransition.java
new file mode 100644
index 0000000..5298ce3
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/SingleArcTransition.java
@@ -0,0 +1,36 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.state;
+
+
+/**
+ * Hook for Transition. This lead to state machine to move to
+ * the post state as registered in the state machine.
+ */
+public interface SingleArcTransition<OPERAND, EVENT> {
+ /**
+ * Transition hook.
+ *
+ * @param operand the entity attached to the FSM, whose internal
+ * state may change.
+ * @param event causal event
+ */
+ public void transition(OPERAND operand, EVENT event);
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachine.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachine.java
new file mode 100644
index 0000000..327538c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachine.java
@@ -0,0 +1,27 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.state;
+
+public interface StateMachine
+ <STATE extends Enum<STATE>,
+ EVENTTYPE extends Enum<EVENTTYPE>, EVENT> {
+ public STATE getCurrentState();
+ public STATE doTransition(EVENTTYPE eventType, EVENT event)
+ throws InvalidStateTransitonException;
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachineFactory.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachineFactory.java
new file mode 100644
index 0000000..2a5244d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachineFactory.java
@@ -0,0 +1,444 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.state;
+
+import java.util.EnumMap;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.Stack;
+
+/**
+ * State machine topology.
+ * This object is semantically immutable. If you have a
+ * StateMachineFactory there's no operation in the API that changes
+ * its semantic properties.
+ *
+ * @param <OPERAND> The object type on which this state machine operates.
+ * @param <STATE> The state of the entity.
+ * @param <EVENTTYPE> The external eventType to be handled.
+ * @param <EVENT> The event object.
+ *
+ */
+final public class StateMachineFactory
+ <OPERAND, STATE extends Enum<STATE>,
+ EVENTTYPE extends Enum<EVENTTYPE>, EVENT> {
+
+ private final TransitionsListNode transitionsListNode;
+
+ private Map<STATE, Map<EVENTTYPE,
+ Transition<OPERAND, STATE, EVENTTYPE, EVENT>>> stateMachineTable;
+
+ private STATE defaultInitialState;
+
+ private final boolean optimized;
+
+ /**
+ * Constructor
+ *
+ * This is the only constructor in the API.
+ *
+ */
+ public StateMachineFactory(STATE defaultInitialState) {
+ this.transitionsListNode = null;
+ this.defaultInitialState = defaultInitialState;
+ this.optimized = false;
+ this.stateMachineTable = null;
+ }
+
+ private StateMachineFactory
+ (StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> that,
+ ApplicableTransition t) {
+ this.defaultInitialState = that.defaultInitialState;
+ this.transitionsListNode
+ = new TransitionsListNode(t, that.transitionsListNode);
+ this.optimized = false;
+ this.stateMachineTable = null;
+ }
+
+ private StateMachineFactory
+ (StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> that,
+ boolean optimized) {
+ this.defaultInitialState = that.defaultInitialState;
+ this.transitionsListNode = that.transitionsListNode;
+ this.optimized = optimized;
+ if (optimized) {
+ makeStateMachineTable();
+ } else {
+ stateMachineTable = null;
+ }
+ }
+
+ private interface ApplicableTransition
+ <OPERAND, STATE extends Enum<STATE>,
+ EVENTTYPE extends Enum<EVENTTYPE>, EVENT> {
+ void apply(StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> subject);
+ }
+
+ private class TransitionsListNode {
+ final ApplicableTransition transition;
+ final TransitionsListNode next;
+
+ TransitionsListNode
+ (ApplicableTransition transition, TransitionsListNode next) {
+ this.transition = transition;
+ this.next = next;
+ }
+ }
+
+ static private class ApplicableSingleOrMultipleTransition
+ <OPERAND, STATE extends Enum<STATE>,
+ EVENTTYPE extends Enum<EVENTTYPE>, EVENT>
+ implements ApplicableTransition<OPERAND, STATE, EVENTTYPE, EVENT> {
+ final STATE preState;
+ final EVENTTYPE eventType;
+ final Transition<OPERAND, STATE, EVENTTYPE, EVENT> transition;
+
+ ApplicableSingleOrMultipleTransition
+ (STATE preState, EVENTTYPE eventType,
+ Transition<OPERAND, STATE, EVENTTYPE, EVENT> transition) {
+ this.preState = preState;
+ this.eventType = eventType;
+ this.transition = transition;
+ }
+
+ @Override
+ public void apply
+ (StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> subject) {
+ Map<EVENTTYPE, Transition<OPERAND, STATE, EVENTTYPE, EVENT>> transitionMap
+ = subject.stateMachineTable.get(preState);
+ if (transitionMap == null) {
+ // I use HashMap here because I would expect most EVENTTYPE's to not
+ // apply out of a particular state, so FSM sizes would be
+ // quadratic if I use EnumMap's here as I do at the top level.
+ transitionMap = new HashMap<EVENTTYPE,
+ Transition<OPERAND, STATE, EVENTTYPE, EVENT>>();
+ subject.stateMachineTable.put(preState, transitionMap);
+ }
+ transitionMap.put(eventType, transition);
+ }
+ }
+
+ /**
+ * @return a NEW StateMachineFactory just like {@code this} with the current
+ * transition added as a new legal transition. This overload
+ * has no hook object.
+ *
+ * Note that the returned StateMachineFactory is a distinct
+ * object.
+ *
+ * This method is part of the API.
+ *
+ * @param preState pre-transition state
+ * @param postState post-transition state
+ * @param eventType stimulus for the transition
+ */
+ public StateMachineFactory
+ <OPERAND, STATE, EVENTTYPE, EVENT>
+ addTransition(STATE preState, STATE postState, EVENTTYPE eventType) {
+ return addTransition(preState, postState, eventType, null);
+ }
+
+ /**
+ * @return a NEW StateMachineFactory just like {@code this} with the current
+ * transition added as a new legal transition. This overload
+ * has no hook object.
+ *
+ *
+ * Note that the returned StateMachineFactory is a distinct
+ * object.
+ *
+ * This method is part of the API.
+ *
+ * @param preState pre-transition state
+ * @param postState post-transition state
+ * @param eventTypes List of stimuli for the transitions
+ */
+ public StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> addTransition(
+ STATE preState, STATE postState, Set<EVENTTYPE> eventTypes) {
+ return addTransition(preState, postState, eventTypes, null);
+ }
+
+ /**
+ * @return a NEW StateMachineFactory just like {@code this} with the current
+ * transition added as a new legal transition
+ *
+ * Note that the returned StateMachineFactory is a distinct
+ * object.
+ *
+ * This method is part of the API.
+ *
+ * @param preState pre-transition state
+ * @param postState post-transition state
+ * @param eventTypes List of stimuli for the transitions
+ * @param hook transition hook
+ */
+ public StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> addTransition(
+ STATE preState, STATE postState, Set<EVENTTYPE> eventTypes,
+ SingleArcTransition<OPERAND, EVENT> hook) {
+ StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> factory = null;
+ for (EVENTTYPE event : eventTypes) {
+ if (factory == null) {
+ factory = addTransition(preState, postState, event, hook);
+ } else {
+ factory = factory.addTransition(preState, postState, event, hook);
+ }
+ }
+ return factory;
+ }
+
+ /**
+ * @return a NEW StateMachineFactory just like {@code this} with the current
+ * transition added as a new legal transition
+ *
+ * Note that the returned StateMachineFactory is a distinct object.
+ *
+ * This method is part of the API.
+ *
+ * @param preState pre-transition state
+ * @param postState post-transition state
+ * @param eventType stimulus for the transition
+ * @param hook transition hook
+ */
+ public StateMachineFactory
+ <OPERAND, STATE, EVENTTYPE, EVENT>
+ addTransition(STATE preState, STATE postState,
+ EVENTTYPE eventType,
+ SingleArcTransition<OPERAND, EVENT> hook){
+ return new StateMachineFactory
+ (this, new ApplicableSingleOrMultipleTransition
+ (preState, eventType, new SingleInternalArc(postState, hook)));
+ }
+
+ /**
+ * @return a NEW StateMachineFactory just like {@code this} with the current
+ * transition added as a new legal transition
+ *
+ * Note that the returned StateMachineFactory is a distinct object.
+ *
+ * This method is part of the API.
+ *
+ * @param preState pre-transition state
+ * @param postStates valid post-transition states
+ * @param eventType stimulus for the transition
+ * @param hook transition hook
+ */
+ public StateMachineFactory
+ <OPERAND, STATE, EVENTTYPE, EVENT>
+ addTransition(STATE preState, Set<STATE> postStates,
+ EVENTTYPE eventType,
+ MultipleArcTransition<OPERAND, EVENT, STATE> hook){
+ return new StateMachineFactory
+ (this,
+ new ApplicableSingleOrMultipleTransition
+ (preState, eventType, new MultipleInternalArc(postStates, hook)));
+ }
+
+ /**
+ * @return a StateMachineFactory just like {@code this}, except that if
+ * you won't need any synchronization to build a state machine
+ *
+ * Note that the returned StateMachineFactory is a distinct object.
+ *
+ * This method is part of the API.
+ *
+ * The only way you could distinguish the returned
+ * StateMachineFactory from {@code this} would be by
+ * measuring the performance of the derived
+ * {@code StateMachine} you can get from it.
+ *
+ * Calling this is optional. It doesn't change the semantics of the factory,
+ * if you call it then when you use the factory there is no synchronization.
+ */
+ public StateMachineFactory
+ <OPERAND, STATE, EVENTTYPE, EVENT>
+ installTopology() {
+ return new StateMachineFactory(this, true);
+ }
+
+ /**
+ * Effect a transition due to the effecting stimulus.
+ * @param state current state
+ * @param eventType trigger to initiate the transition
+ * @param cause causal eventType context
+ * @return transitioned state
+ */
+ private STATE doTransition
+ (OPERAND operand, STATE oldState, EVENTTYPE eventType, EVENT event)
+ throws InvalidStateTransitonException {
+ // We can assume that stateMachineTable is non-null because we call
+ // maybeMakeStateMachineTable() when we build an InnerStateMachine ,
+ // and this code only gets called from inside a working InnerStateMachine .
+ Map<EVENTTYPE, Transition<OPERAND, STATE, EVENTTYPE, EVENT>> transitionMap
+ = stateMachineTable.get(oldState);
+ if (transitionMap != null) {
+ Transition<OPERAND, STATE, EVENTTYPE, EVENT> transition
+ = transitionMap.get(eventType);
+ if (transition != null) {
+ return transition.doTransition(operand, oldState, event, eventType);
+ }
+ }
+ throw new InvalidStateTransitonException(oldState, eventType);
+ }
+
+ private synchronized void maybeMakeStateMachineTable() {
+ if (stateMachineTable == null) {
+ makeStateMachineTable();
+ }
+ }
+
+ private void makeStateMachineTable() {
+ Stack<ApplicableTransition> stack = new Stack<ApplicableTransition>();
+
+ Map<STATE, Map<EVENTTYPE, Transition<OPERAND, STATE, EVENTTYPE, EVENT>>>
+ prototype = new HashMap<STATE, Map<EVENTTYPE, Transition<OPERAND, STATE, EVENTTYPE, EVENT>>>();
+
+ prototype.put(defaultInitialState, null);
+
+ // I use EnumMap here because it'll be faster and denser. I would
+ // expect most of the states to have at least one transition.
+ stateMachineTable
+ = new EnumMap<STATE, Map<EVENTTYPE,
+ Transition<OPERAND, STATE, EVENTTYPE, EVENT>>>(prototype);
+
+ for (TransitionsListNode cursor = transitionsListNode;
+ cursor != null;
+ cursor = cursor.next) {
+ stack.push(cursor.transition);
+ }
+
+ while (!stack.isEmpty()) {
+ stack.pop().apply(this);
+ }
+ }
+
+ private interface Transition<OPERAND, STATE extends Enum<STATE>,
+ EVENTTYPE extends Enum<EVENTTYPE>, EVENT> {
+ STATE doTransition(OPERAND operand, STATE oldState,
+ EVENT event, EVENTTYPE eventType);
+ }
+
+ private class SingleInternalArc
+ implements Transition<OPERAND, STATE, EVENTTYPE, EVENT> {
+
+ private STATE postState;
+ private SingleArcTransition<OPERAND, EVENT> hook; // transition hook
+
+ SingleInternalArc(STATE postState,
+ SingleArcTransition<OPERAND, EVENT> hook) {
+ this.postState = postState;
+ this.hook = hook;
+ }
+
+ @Override
+ public STATE doTransition(OPERAND operand, STATE oldState,
+ EVENT event, EVENTTYPE eventType) {
+ if (hook != null) {
+ hook.transition(operand, event);
+ }
+ return postState;
+ }
+ }
+
+ private class MultipleInternalArc
+ implements Transition<OPERAND, STATE, EVENTTYPE, EVENT>{
+
+ // Fields
+ private Set<STATE> validPostStates;
+ private MultipleArcTransition<OPERAND, EVENT, STATE> hook; // transition hook
+
+ MultipleInternalArc(Set<STATE> postStates,
+ MultipleArcTransition<OPERAND, EVENT, STATE> hook) {
+ this.validPostStates = postStates;
+ this.hook = hook;
+ }
+
+ @Override
+ public STATE doTransition(OPERAND operand, STATE oldState,
+ EVENT event, EVENTTYPE eventType)
+ throws InvalidStateTransitonException {
+ STATE postState = hook.transition(operand, event);
+
+ if (!validPostStates.contains(postState)) {
+ throw new InvalidStateTransitonException(oldState, eventType);
+ }
+ return postState;
+ }
+ }
+
+ /*
+ * @return a {@link StateMachine} that starts in
+ * {@code initialState} and whose {@link Transition} s are
+ * applied to {@code operand} .
+ *
+ * This is part of the API.
+ *
+ * @param operand the object upon which the returned
+ * {@link StateMachine} will operate.
+ * @param initialState the state in which the returned
+ * {@link StateMachine} will start.
+ *
+ */
+ public StateMachine<STATE, EVENTTYPE, EVENT>
+ make(OPERAND operand, STATE initialState) {
+ return new InternalStateMachine(operand, initialState);
+ }
+
+ /*
+ * @return a {@link StateMachine} that starts in the default initial
+ * state and whose {@link Transition} s are applied to
+ * {@code operand} .
+ *
+ * This is part of the API.
+ *
+ * @param operand the object upon which the returned
+ * {@link StateMachine} will operate.
+ *
+ */
+ public StateMachine<STATE, EVENTTYPE, EVENT> make(OPERAND operand) {
+ return new InternalStateMachine(operand, defaultInitialState);
+ }
+
+ private class InternalStateMachine
+ implements StateMachine<STATE, EVENTTYPE, EVENT> {
+ private final OPERAND operand;
+ private STATE currentState;
+
+ InternalStateMachine(OPERAND operand, STATE initialState) {
+ this.operand = operand;
+ this.currentState = initialState;
+ if (!optimized) {
+ maybeMakeStateMachineTable();
+ }
+ }
+
+ @Override
+ public synchronized STATE getCurrentState() {
+ return currentState;
+ }
+
+ @Override
+ public synchronized STATE doTransition(EVENTTYPE eventType, EVENT event)
+ throws InvalidStateTransitonException {
+ currentState = StateMachineFactory.this.doTransition
+ (operand, currentState, eventType, event);
+ return currentState;
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java
new file mode 100644
index 0000000..3efd9da
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java
@@ -0,0 +1,122 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.util;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.service.AbstractService;
+
+public abstract class AbstractLivelinessMonitor<O> extends AbstractService {
+
+ private static final Log LOG = LogFactory.getLog(AbstractLivelinessMonitor.class);
+
+ //thread which runs periodically to see the last time since a heartbeat is
+ //received.
+ private Thread checkerThread;
+ private volatile boolean stopped;
+ public static final int DEFAULT_EXPIRE = 5*60*1000;//5 mins
+ private int expireInterval = DEFAULT_EXPIRE;
+ private int monitorInterval = expireInterval/3;
+
+ private final Clock clock;
+
+ private Map<O, Long> running = new HashMap<O, Long>();
+
+ public AbstractLivelinessMonitor(String name, Clock clock) {
+ super(name);
+ this.clock = clock;
+ }
+
+ @Override
+ public void start() {
+ checkerThread = new Thread(new PingChecker());
+ checkerThread.start();
+ super.start();
+ }
+
+ @Override
+ public void stop() {
+ stopped = true;
+ checkerThread.interrupt();
+ super.stop();
+ }
+
+ protected abstract void expire(O ob);
+
+ protected void setExpireInterval(int expireInterval) {
+ this.expireInterval = expireInterval;
+ }
+
+ protected void setMonitorInterval(int monitorInterval) {
+ this.monitorInterval = monitorInterval;
+ }
+
+ public synchronized void receivedPing(O ob) {
+ //only put for the registered objects
+ if (running.containsKey(ob)) {
+ running.put(ob, clock.getTime());
+ }
+ }
+
+ public synchronized void register(O ob) {
+ running.put(ob, clock.getTime());
+ }
+
+ public synchronized void unregister(O ob) {
+ running.remove(ob);
+ }
+
+ private class PingChecker implements Runnable {
+
+ @Override
+ public void run() {
+ while (!stopped && !Thread.currentThread().isInterrupted()) {
+ synchronized (AbstractLivelinessMonitor.this) {
+ Iterator<Map.Entry<O, Long>> iterator =
+ running.entrySet().iterator();
+
+ //avoid calculating current time everytime in loop
+ long currentTime = clock.getTime();
+
+ while (iterator.hasNext()) {
+ Map.Entry<O, Long> entry = iterator.next();
+ if (currentTime > entry.getValue() + expireInterval) {
+ iterator.remove();
+ expire(entry.getKey());
+ LOG.info("Expired:" + entry.getKey().toString() +
+ " Timed out after " + expireInterval/1000 + " secs");
+ }
+ }
+ }
+ try {
+ Thread.sleep(monitorInterval);
+ } catch (InterruptedException e) {
+ LOG.info(getName() + " thread interrupted");
+ break;
+ }
+ }
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
new file mode 100644
index 0000000..fcb3f95
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
@@ -0,0 +1,66 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.util;
+
+import java.util.Iterator;
+
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+
+import static org.apache.hadoop.yarn.util.StringHelper.*;
+
+/**
+ * Yarn application related utilities
+ */
+public class Apps {
+ public static final String APP = "app";
+ public static final String ID = "ID";
+
+ public static String toString(ApplicationId id) {
+ return _join("app", id.getClusterTimestamp(), id.getId());
+ }
+
+ public static ApplicationId toAppID(String aid) {
+ Iterator<String> it = _split(aid).iterator();
+ return toAppID(APP, aid, it);
+ }
+
+ public static ApplicationId toAppID(String prefix, String s, Iterator<String> it) {
+ if (!it.hasNext() || !it.next().equals(prefix)) {
+ throwParseException(sjoin(prefix, ID), s);
+ }
+ shouldHaveNext(prefix, s, it);
+ ApplicationId appId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class);
+ appId.setClusterTimestamp(Long.parseLong(it.next()));
+ shouldHaveNext(prefix, s, it);
+ appId.setId(Integer.parseInt(it.next()));
+ return appId;
+ }
+
+ public static void shouldHaveNext(String prefix, String s, Iterator<String> it) {
+ if (!it.hasNext()) {
+ throwParseException(sjoin(prefix, ID), s);
+ }
+ }
+
+ public static void throwParseException(String name, String s) {
+ throw new YarnException(join("Error parsing ", name, ": ", s));
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java
new file mode 100644
index 0000000..be53abc
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java
@@ -0,0 +1,228 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.util;
+
+import java.net.URI;
+import java.util.Comparator;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+
+/**
+ * Builder utilities to construct various objects.
+ *
+ */
+public class BuilderUtils {
+
+ private static final RecordFactory recordFactory = RecordFactoryProvider
+ .getRecordFactory(null);
+
+ public static class ApplicationIdComparator implements
+ Comparator<ApplicationId> {
+ @Override
+ public int compare(ApplicationId a1, ApplicationId a2) {
+ return a1.compareTo(a2);
+ }
+ }
+
+ public static class ContainerIdComparator implements
+ java.util.Comparator<ContainerId> {
+
+ @Override
+ public int compare(ContainerId c1,
+ ContainerId c2) {
+ return c1.compareTo(c2);
+ }
+ }
+
+ public static class ResourceRequestComparator
+ implements java.util.Comparator<org.apache.hadoop.yarn.api.records.ResourceRequest> {
+ @Override
+ public int compare(org.apache.hadoop.yarn.api.records.ResourceRequest r1,
+ org.apache.hadoop.yarn.api.records.ResourceRequest r2) {
+
+ // Compare priority, host and capability
+ int ret = r1.getPriority().compareTo(r2.getPriority());
+ if (ret == 0) {
+ String h1 = r1.getHostName();
+ String h2 = r2.getHostName();
+ ret = h1.compareTo(h2);
+ }
+ if (ret == 0) {
+ ret = r1.getCapability().compareTo(r2.getCapability());
+ }
+ return ret;
+ }
+ }
+
+ public static LocalResource newLocalResource(RecordFactory recordFactory,
+ URI uri, LocalResourceType type, LocalResourceVisibility visibility,
+ long size, long timestamp) {
+ LocalResource resource =
+ recordFactory.newRecordInstance(LocalResource.class);
+ resource.setResource(ConverterUtils.getYarnUrlFromURI(uri));
+ resource.setType(type);
+ resource.setVisibility(visibility);
+ resource.setSize(size);
+ resource.setTimestamp(timestamp);
+ return resource;
+ }
+
+ public static ApplicationId newApplicationId(RecordFactory recordFactory,
+ long clustertimestamp, CharSequence id) {
+ ApplicationId applicationId =
+ recordFactory.newRecordInstance(ApplicationId.class);
+ applicationId.setId(Integer.valueOf(id.toString()));
+ applicationId.setClusterTimestamp(clustertimestamp);
+ return applicationId;
+ }
+
+ public static ApplicationId newApplicationId(RecordFactory recordFactory,
+ long clusterTimeStamp, int id) {
+ ApplicationId applicationId =
+ recordFactory.newRecordInstance(ApplicationId.class);
+ applicationId.setId(id);
+ applicationId.setClusterTimestamp(clusterTimeStamp);
+ return applicationId;
+ }
+
+ public static ApplicationId newApplicationId(long clusterTimeStamp, int id) {
+ ApplicationId applicationId =
+ recordFactory.newRecordInstance(ApplicationId.class);
+ applicationId.setId(id);
+ applicationId.setClusterTimestamp(clusterTimeStamp);
+ return applicationId;
+ }
+
+ public static ApplicationId convert(long clustertimestamp, CharSequence id) {
+ ApplicationId applicationId =
+ recordFactory.newRecordInstance(ApplicationId.class);
+ applicationId.setId(Integer.valueOf(id.toString()));
+ applicationId.setClusterTimestamp(clustertimestamp);
+ return applicationId;
+ }
+
+ public static ContainerId newContainerId(RecordFactory recordFactory,
+ ApplicationId appId,
+ int containerId) {
+ ContainerId id = recordFactory.newRecordInstance(ContainerId.class);
+ id.setAppId(appId);
+ id.setId(containerId);
+ return id;
+ }
+
+ public static ContainerId newContainerId(RecordFactory recordFactory,
+ ApplicationAttemptId appAttemptId,
+ int containerId) {
+ ContainerId id = recordFactory.newRecordInstance(ContainerId.class);
+ id.setAppAttemptId(appAttemptId);
+ id.setAppId(appAttemptId.getApplicationId());
+ id.setId(containerId);
+ return id;
+ }
+
+ public static Container clone(Container c) {
+ Container container = recordFactory.newRecordInstance(Container.class);
+ container.setId(c.getId());
+ container.setContainerToken(c.getContainerToken());
+ container.setNodeId(c.getNodeId());
+ container.setNodeHttpAddress(c.getNodeHttpAddress());
+ container.setResource(c.getResource());
+ container.setState(c.getState());
+ return container;
+ }
+
+ public static Container newContainer(RecordFactory recordFactory,
+ ApplicationAttemptId appAttemptId, int containerId, NodeId nodeId,
+ String nodeHttpAddress, Resource resource) {
+ ContainerId containerID =
+ newContainerId(recordFactory, appAttemptId, containerId);
+ return newContainer(containerID, nodeId, nodeHttpAddress, resource);
+ }
+
+ public static Container newContainer(ContainerId containerId,
+ NodeId nodeId, String nodeHttpAddress, Resource resource) {
+ Container container = recordFactory.newRecordInstance(Container.class);
+ container.setId(containerId);
+ container.setNodeId(nodeId);
+ container.setNodeHttpAddress(nodeHttpAddress);
+ container.setResource(resource);
+ container.setState(ContainerState.NEW);
+ ContainerStatus containerStatus = Records.newRecord(ContainerStatus.class);
+ containerStatus.setContainerId(containerId);
+ containerStatus.setState(ContainerState.NEW);
+ container.setContainerStatus(containerStatus);
+ return container;
+ }
+
+ public static ResourceRequest newResourceRequest(Priority priority,
+ String hostName, Resource capability, int numContainers) {
+ ResourceRequest request = recordFactory
+ .newRecordInstance(ResourceRequest.class);
+ request.setPriority(priority);
+ request.setHostName(hostName);
+ request.setCapability(capability);
+ request.setNumContainers(numContainers);
+ return request;
+ }
+
+ public static ResourceRequest newResourceRequest(ResourceRequest r) {
+ ResourceRequest request = recordFactory
+ .newRecordInstance(ResourceRequest.class);
+ request.setPriority(r.getPriority());
+ request.setHostName(r.getHostName());
+ request.setCapability(r.getCapability());
+ request.setNumContainers(r.getNumContainers());
+ return request;
+ }
+
+ public static ApplicationReport newApplicationReport(
+ ApplicationId applicationId, String user, String queue, String name,
+ String host, int rpcPort, String clientToken, ApplicationState state,
+ String diagnostics, String url) {
+ ApplicationReport report = recordFactory
+ .newRecordInstance(ApplicationReport.class);
+ report.setApplicationId(applicationId);
+ report.setUser(user);
+ report.setQueue(queue);
+ report.setName(name);
+ report.setHost(host);
+ report.setRpcPort(rpcPort);
+ report.setClientToken(clientToken);
+ report.setState(state);
+ report.setDiagnostics(diagnostics);
+ report.setTrackingUrl(url);
+ return report;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
new file mode 100644
index 0000000..5e80f41
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
@@ -0,0 +1,164 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.util;
+
+import static org.apache.hadoop.yarn.util.StringHelper._split;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.text.NumberFormat;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+
+
+/**
+ * This class contains a set of utilities which help converting data structures
+ * from/to 'serializableFormat' to/from hadoop/nativejava data structures.
+ *
+ */
+public class ConverterUtils {
+
+ public static final String APPLICATION_PREFIX = "application";
+
+ /**
+ * return a hadoop path from a given url
+ *
+ * @param url
+ * url to convert
+ * @return
+ * @throws URISyntaxException
+ */
+ public static Path getPathFromYarnURL(URL url) throws URISyntaxException {
+ String scheme = url.getScheme() == null ? "" : url.getScheme();
+ String authority = url.getHost() != null ? url.getHost() + ":" + url.getPort()
+ : "";
+ return new Path(
+ (new URI(scheme, authority, url.getFile(), null, null)).normalize());
+ }
+
+ /**
+ * change from CharSequence to string for map key and value
+ * @param env
+ * @return
+ */
+ public static Map<String, String> convertToString(
+ Map<CharSequence, CharSequence> env) {
+
+ Map<String, String> stringMap = new HashMap<String, String>();
+ for (Entry<CharSequence, CharSequence> entry: env.entrySet()) {
+ stringMap.put(entry.getKey().toString(), entry.getValue().toString());
+ }
+ return stringMap;
+ }
+
+ public static URL getYarnUrlFromPath(Path path) {
+ return getYarnUrlFromURI(path.toUri());
+ }
+
+ public static URL getYarnUrlFromURI(URI uri) {
+ URL url = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(URL.class);
+ if (uri.getHost() != null) {
+ url.setHost(uri.getHost());
+ }
+ url.setPort(uri.getPort());
+ url.setScheme(uri.getScheme());
+ url.setFile(uri.getPath());
+ return url;
+ }
+
+ // TODO: Why thread local?
+ // ^ NumberFormat instances are not threadsafe
+ private static final ThreadLocal<NumberFormat> appIdFormat =
+ new ThreadLocal<NumberFormat>() {
+ @Override
+ public NumberFormat initialValue() {
+ NumberFormat fmt = NumberFormat.getInstance();
+ fmt.setGroupingUsed(false);
+ fmt.setMinimumIntegerDigits(4);
+ return fmt;
+ }
+ };
+
+ // TODO: Why thread local?
+ // ^ NumberFormat instances are not threadsafe
+ private static final ThreadLocal<NumberFormat> containerIdFormat =
+ new ThreadLocal<NumberFormat>() {
+ @Override
+ public NumberFormat initialValue() {
+ NumberFormat fmt = NumberFormat.getInstance();
+ fmt.setGroupingUsed(false);
+ fmt.setMinimumIntegerDigits(6);
+ return fmt;
+ }
+ };
+
+ public static String toString(ApplicationId appId) {
+ StringBuilder sb = new StringBuilder();
+ sb.append(APPLICATION_PREFIX + "_").append(appId.getClusterTimestamp())
+ .append("_");
+ sb.append(appIdFormat.get().format(appId.getId()));
+ return sb.toString();
+ }
+
+ public static ApplicationId toApplicationId(RecordFactory recordFactory,
+ String appIdStr) {
+ Iterator<String> it = _split(appIdStr).iterator();
+ it.next(); // prefix. TODO: Validate application prefix
+ return toApplicationId(recordFactory, it);
+ }
+
+ private static ApplicationId toApplicationId(RecordFactory recordFactory,
+ Iterator<String> it) {
+ ApplicationId appId =
+ recordFactory.newRecordInstance(ApplicationId.class);
+ appId.setClusterTimestamp(Long.parseLong(it.next()));
+ appId.setId(Integer.parseInt(it.next()));
+ return appId;
+ }
+
+ public static String toString(ContainerId cId) {
+ StringBuilder sb = new StringBuilder();
+ ApplicationId appId = cId.getAppId();
+ sb.append("container_").append(appId.getClusterTimestamp()).append("_");
+ sb.append(appIdFormat.get().format(appId.getId())).append("_");
+ sb.append(containerIdFormat.get().format(cId.getId()));
+ return sb.toString();
+ }
+
+ public static ContainerId toContainerId(RecordFactory recordFactory,
+ String containerIdStr) {
+ Iterator<String> it = _split(containerIdStr).iterator();
+ it.next(); // prefix. TODO: Validate container prefix
+ ApplicationId appID = toApplicationId(recordFactory, it);
+ ContainerId containerId =
+ recordFactory.newRecordInstance(ContainerId.class);
+ containerId.setAppId(appID);
+ containerId.setId(Integer.parseInt(it.next()));
+ return containerId;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/LinuxResourceCalculatorPlugin.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/LinuxResourceCalculatorPlugin.java
new file mode 100644
index 0000000..aa69780
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/LinuxResourceCalculatorPlugin.java
@@ -0,0 +1,412 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import java.io.BufferedReader;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.IOException;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Plugin to calculate resource information on Linux systems.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class LinuxResourceCalculatorPlugin extends ResourceCalculatorPlugin {
+ private static final Log LOG =
+ LogFactory.getLog(LinuxResourceCalculatorPlugin.class);
+
+ public static final int UNAVAILABLE = -1;
+
+ /**
+ * proc's meminfo virtual file has keys-values in the format
+ * "key:[ \t]*value[ \t]kB".
+ */
+ private static final String PROCFS_MEMFILE = "/proc/meminfo";
+ private static final Pattern PROCFS_MEMFILE_FORMAT =
+ Pattern.compile("^([a-zA-Z]*):[ \t]*([0-9]*)[ \t]kB");
+
+ // We need the values for the following keys in meminfo
+ private static final String MEMTOTAL_STRING = "MemTotal";
+ private static final String SWAPTOTAL_STRING = "SwapTotal";
+ private static final String MEMFREE_STRING = "MemFree";
+ private static final String SWAPFREE_STRING = "SwapFree";
+ private static final String INACTIVE_STRING = "Inactive";
+
+ /**
+ * Patterns for parsing /proc/cpuinfo
+ */
+ private static final String PROCFS_CPUINFO = "/proc/cpuinfo";
+ private static final Pattern PROCESSOR_FORMAT =
+ Pattern.compile("^processor[ \t]:[ \t]*([0-9]*)");
+ private static final Pattern FREQUENCY_FORMAT =
+ Pattern.compile("^cpu MHz[ \t]*:[ \t]*([0-9.]*)");
+
+ /**
+ * Pattern for parsing /proc/stat
+ */
+ private static final String PROCFS_STAT = "/proc/stat";
+ private static final Pattern CPU_TIME_FORMAT =
+ Pattern.compile("^cpu[ \t]*([0-9]*)" +
+ "[ \t]*([0-9]*)[ \t]*([0-9]*)[ \t].*");
+
+ private String procfsMemFile;
+ private String procfsCpuFile;
+ private String procfsStatFile;
+ long jiffyLengthInMillis;
+
+ private long ramSize = 0;
+ private long swapSize = 0;
+ private long ramSizeFree = 0; // free ram space on the machine (kB)
+ private long swapSizeFree = 0; // free swap space on the machine (kB)
+ private long inactiveSize = 0; // inactive cache memory (kB)
+ private int numProcessors = 0; // number of processors on the system
+ private long cpuFrequency = 0L; // CPU frequency on the system (kHz)
+ private long cumulativeCpuTime = 0L; // CPU used time since system is on (ms)
+ private long lastCumulativeCpuTime = 0L; // CPU used time read last time (ms)
+ // Unix timestamp while reading the CPU time (ms)
+ private float cpuUsage = UNAVAILABLE;
+ private long sampleTime = UNAVAILABLE;
+ private long lastSampleTime = UNAVAILABLE;
+ private ProcfsBasedProcessTree pTree = null;
+
+ boolean readMemInfoFile = false;
+ boolean readCpuInfoFile = false;
+
+ /**
+ * Get current time
+ * @return Unix time stamp in millisecond
+ */
+ long getCurrentTime() {
+ return System.currentTimeMillis();
+ }
+
+ public LinuxResourceCalculatorPlugin() {
+ procfsMemFile = PROCFS_MEMFILE;
+ procfsCpuFile = PROCFS_CPUINFO;
+ procfsStatFile = PROCFS_STAT;
+ jiffyLengthInMillis = ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS;
+ String pid = System.getenv().get("JVM_PID");
+ pTree = new ProcfsBasedProcessTree(pid);
+ }
+
+ /**
+ * Constructor which allows assigning the /proc/ directories. This will be
+ * used only in unit tests
+ * @param procfsMemFile fake file for /proc/meminfo
+ * @param procfsCpuFile fake file for /proc/cpuinfo
+ * @param procfsStatFile fake file for /proc/stat
+ * @param jiffyLengthInMillis fake jiffy length value
+ */
+ public LinuxResourceCalculatorPlugin(String procfsMemFile,
+ String procfsCpuFile,
+ String procfsStatFile,
+ long jiffyLengthInMillis) {
+ this.procfsMemFile = procfsMemFile;
+ this.procfsCpuFile = procfsCpuFile;
+ this.procfsStatFile = procfsStatFile;
+ this.jiffyLengthInMillis = jiffyLengthInMillis;
+ String pid = System.getenv().get("JVM_PID");
+ pTree = new ProcfsBasedProcessTree(pid);
+ }
+
+ /**
+ * Read /proc/meminfo, parse and compute memory information only once
+ */
+ private void readProcMemInfoFile() {
+ readProcMemInfoFile(false);
+ }
+
+ /**
+ * Read /proc/meminfo, parse and compute memory information
+ * @param readAgain if false, read only on the first time
+ */
+ private void readProcMemInfoFile(boolean readAgain) {
+
+ if (readMemInfoFile && !readAgain) {
+ return;
+ }
+
+ // Read "/proc/memInfo" file
+ BufferedReader in = null;
+ FileReader fReader = null;
+ try {
+ fReader = new FileReader(procfsMemFile);
+ in = new BufferedReader(fReader);
+ } catch (FileNotFoundException f) {
+ // shouldn't happen....
+ return;
+ }
+
+ Matcher mat = null;
+
+ try {
+ String str = in.readLine();
+ while (str != null) {
+ mat = PROCFS_MEMFILE_FORMAT.matcher(str);
+ if (mat.find()) {
+ if (mat.group(1).equals(MEMTOTAL_STRING)) {
+ ramSize = Long.parseLong(mat.group(2));
+ } else if (mat.group(1).equals(SWAPTOTAL_STRING)) {
+ swapSize = Long.parseLong(mat.group(2));
+ } else if (mat.group(1).equals(MEMFREE_STRING)) {
+ ramSizeFree = Long.parseLong(mat.group(2));
+ } else if (mat.group(1).equals(SWAPFREE_STRING)) {
+ swapSizeFree = Long.parseLong(mat.group(2));
+ } else if (mat.group(1).equals(INACTIVE_STRING)) {
+ inactiveSize = Long.parseLong(mat.group(2));
+ }
+ }
+ str = in.readLine();
+ }
+ } catch (IOException io) {
+ LOG.warn("Error reading the stream " + io);
+ } finally {
+ // Close the streams
+ try {
+ fReader.close();
+ try {
+ in.close();
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + in);
+ }
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + fReader);
+ }
+ }
+
+ readMemInfoFile = true;
+ }
+
+ /**
+ * Read /proc/cpuinfo, parse and calculate CPU information
+ */
+ private void readProcCpuInfoFile() {
+ // This directory needs to be read only once
+ if (readCpuInfoFile) {
+ return;
+ }
+ // Read "/proc/cpuinfo" file
+ BufferedReader in = null;
+ FileReader fReader = null;
+ try {
+ fReader = new FileReader(procfsCpuFile);
+ in = new BufferedReader(fReader);
+ } catch (FileNotFoundException f) {
+ // shouldn't happen....
+ return;
+ }
+ Matcher mat = null;
+ try {
+ numProcessors = 0;
+ String str = in.readLine();
+ while (str != null) {
+ mat = PROCESSOR_FORMAT.matcher(str);
+ if (mat.find()) {
+ numProcessors++;
+ }
+ mat = FREQUENCY_FORMAT.matcher(str);
+ if (mat.find()) {
+ cpuFrequency = (long)(Double.parseDouble(mat.group(1)) * 1000); // kHz
+ }
+ str = in.readLine();
+ }
+ } catch (IOException io) {
+ LOG.warn("Error reading the stream " + io);
+ } finally {
+ // Close the streams
+ try {
+ fReader.close();
+ try {
+ in.close();
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + in);
+ }
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + fReader);
+ }
+ }
+ readCpuInfoFile = true;
+ }
+
+ /**
+ * Read /proc/stat file, parse and calculate cumulative CPU
+ */
+ private void readProcStatFile() {
+ // Read "/proc/stat" file
+ BufferedReader in = null;
+ FileReader fReader = null;
+ try {
+ fReader = new FileReader(procfsStatFile);
+ in = new BufferedReader(fReader);
+ } catch (FileNotFoundException f) {
+ // shouldn't happen....
+ return;
+ }
+
+ Matcher mat = null;
+ try {
+ String str = in.readLine();
+ while (str != null) {
+ mat = CPU_TIME_FORMAT.matcher(str);
+ if (mat.find()) {
+ long uTime = Long.parseLong(mat.group(1));
+ long nTime = Long.parseLong(mat.group(2));
+ long sTime = Long.parseLong(mat.group(3));
+ cumulativeCpuTime = uTime + nTime + sTime; // milliseconds
+ break;
+ }
+ str = in.readLine();
+ }
+ cumulativeCpuTime *= jiffyLengthInMillis;
+ } catch (IOException io) {
+ LOG.warn("Error reading the stream " + io);
+ } finally {
+ // Close the streams
+ try {
+ fReader.close();
+ try {
+ in.close();
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + in);
+ }
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + fReader);
+ }
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getPhysicalMemorySize() {
+ readProcMemInfoFile();
+ return ramSize * 1024;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getVirtualMemorySize() {
+ readProcMemInfoFile();
+ return (ramSize + swapSize) * 1024;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getAvailablePhysicalMemorySize() {
+ readProcMemInfoFile(true);
+ return (ramSizeFree + inactiveSize) * 1024;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getAvailableVirtualMemorySize() {
+ readProcMemInfoFile(true);
+ return (ramSizeFree + swapSizeFree + inactiveSize) * 1024;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public int getNumProcessors() {
+ readProcCpuInfoFile();
+ return numProcessors;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getCpuFrequency() {
+ readProcCpuInfoFile();
+ return cpuFrequency;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getCumulativeCpuTime() {
+ readProcStatFile();
+ return cumulativeCpuTime;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public float getCpuUsage() {
+ readProcStatFile();
+ sampleTime = getCurrentTime();
+ if (lastSampleTime == UNAVAILABLE ||
+ lastSampleTime > sampleTime) {
+ // lastSampleTime > sampleTime may happen when the system time is changed
+ lastSampleTime = sampleTime;
+ lastCumulativeCpuTime = cumulativeCpuTime;
+ return cpuUsage;
+ }
+ // When lastSampleTime is sufficiently old, update cpuUsage.
+ // Also take a sample of the current time and cumulative CPU time for the
+ // use of the next calculation.
+ final long MINIMUM_UPDATE_INTERVAL = 10 * jiffyLengthInMillis;
+ if (sampleTime > lastSampleTime + MINIMUM_UPDATE_INTERVAL) {
+ cpuUsage = (float)(cumulativeCpuTime - lastCumulativeCpuTime) * 100F /
+ ((float)(sampleTime - lastSampleTime) * getNumProcessors());
+ lastSampleTime = sampleTime;
+ lastCumulativeCpuTime = cumulativeCpuTime;
+ }
+ return cpuUsage;
+ }
+
+ /**
+ * Test the {@link LinuxResourceCalculatorPlugin}
+ *
+ * @param args
+ */
+ public static void main(String[] args) {
+ LinuxResourceCalculatorPlugin plugin = new LinuxResourceCalculatorPlugin();
+ System.out.println("Physical memory Size (bytes) : "
+ + plugin.getPhysicalMemorySize());
+ System.out.println("Total Virtual memory Size (bytes) : "
+ + plugin.getVirtualMemorySize());
+ System.out.println("Available Physical memory Size (bytes) : "
+ + plugin.getAvailablePhysicalMemorySize());
+ System.out.println("Total Available Virtual memory Size (bytes) : "
+ + plugin.getAvailableVirtualMemorySize());
+ System.out.println("Number of Processors : " + plugin.getNumProcessors());
+ System.out.println("CPU frequency (kHz) : " + plugin.getCpuFrequency());
+ System.out.println("Cumulative CPU time (ms) : " +
+ plugin.getCumulativeCpuTime());
+ try {
+ // Sleep so we can compute the CPU usage
+ Thread.sleep(500L);
+ } catch (InterruptedException e) {
+ // do nothing
+ }
+ System.out.println("CPU usage % : " + plugin.getCpuUsage());
+ }
+
+ @Override
+ public ProcResourceValues getProcResourceValues() {
+ pTree = pTree.getProcessTree();
+ long cpuTime = pTree.getCumulativeCpuTime();
+ long pMem = pTree.getCumulativeRssmem();
+ long vMem = pTree.getCumulativeVmem();
+ return new ProcResourceValues(cpuTime, pMem, vMem);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
new file mode 100644
index 0000000..a934f0c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -0,0 +1,631 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+import org.apache.hadoop.util.StringUtils;
+
+/**
+ * A Proc file-system based ProcessTree. Works only on Linux.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class ProcfsBasedProcessTree {
+
+ static final Log LOG = LogFactory
+ .getLog(ProcfsBasedProcessTree.class);
+
+ private static final String PROCFS = "/proc/";
+
+ private static final Pattern PROCFS_STAT_FILE_FORMAT = Pattern .compile(
+ "^([0-9-]+)\\s([^\\s]+)\\s[^\\s]\\s([0-9-]+)\\s([0-9-]+)\\s([0-9-]+)\\s" +
+ "([0-9-]+\\s){7}([0-9]+)\\s([0-9]+)\\s([0-9-]+\\s){7}([0-9]+)\\s([0-9]+)" +
+ "(\\s[0-9-]+){15}");
+
+ public static final String PROCFS_STAT_FILE = "stat";
+ public static final String PROCFS_CMDLINE_FILE = "cmdline";
+ public static final long PAGE_SIZE;
+ static {
+ ShellCommandExecutor shellExecutor =
+ new ShellCommandExecutor(new String[]{"getconf", "PAGESIZE"});
+ long pageSize = -1;
+ try {
+ shellExecutor.execute();
+ pageSize = Long.parseLong(shellExecutor.getOutput().replace("\n", ""));
+ } catch (IOException e) {
+ LOG.error(StringUtils.stringifyException(e));
+ } finally {
+ PAGE_SIZE = pageSize;
+ }
+ }
+ public static final long JIFFY_LENGTH_IN_MILLIS; // in millisecond
+ static {
+ ShellCommandExecutor shellExecutor =
+ new ShellCommandExecutor(new String[]{"getconf", "CLK_TCK"});
+ long jiffiesPerSecond = -1;
+ try {
+ shellExecutor.execute();
+ jiffiesPerSecond = Long.parseLong(shellExecutor.getOutput().replace("\n", ""));
+ } catch (IOException e) {
+ LOG.error(StringUtils.stringifyException(e));
+ } finally {
+ JIFFY_LENGTH_IN_MILLIS = jiffiesPerSecond != -1 ?
+ Math.round(1000D / jiffiesPerSecond) : -1;
+ }
+ }
+
+ // to enable testing, using this variable which can be configured
+ // to a test directory.
+ private String procfsDir;
+
+ protected final Integer pid;
+ private Long cpuTime = 0L;
+ private boolean setsidUsed = false;
+
+ protected Map<Integer, ProcessInfo> processTree =
+ new HashMap<Integer, ProcessInfo>();
+
+ public ProcfsBasedProcessTree(String pid) {
+ this(pid, false);
+ }
+
+ public ProcfsBasedProcessTree(String pid, boolean setsidUsed) {
+ this(pid, setsidUsed, PROCFS);
+ }
+
+ /**
+ * Build a new process tree rooted at the pid.
+ *
+ * This method is provided mainly for testing purposes, where
+ * the root of the proc file system can be adjusted.
+ *
+ * @param pid root of the process tree
+ * @param setsidUsed true, if setsid was used for the root pid
+ * @param procfsDir the root of a proc file system - only used for testing.
+ */
+ public ProcfsBasedProcessTree(String pid, boolean setsidUsed,
+ String procfsDir) {
+ this.pid = getValidPID(pid);
+ this.setsidUsed = setsidUsed;
+ this.procfsDir = procfsDir;
+ }
+
+ /**
+ * Checks if the ProcfsBasedProcessTree is available on this system.
+ *
+ * @return true if ProcfsBasedProcessTree is available. False otherwise.
+ */
+ public static boolean isAvailable() {
+ try {
+ String osName = System.getProperty("os.name");
+ if (!osName.startsWith("Linux")) {
+ LOG.info("ProcfsBasedProcessTree currently is supported only on "
+ + "Linux.");
+ return false;
+ }
+ } catch (SecurityException se) {
+ LOG.warn("Failed to get Operating System name. " + se);
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * Get the process-tree with latest state. If the root-process is not alive,
+ * an empty tree will be returned.
+ *
+ * @return the process-tree with latest state.
+ */
+ public ProcfsBasedProcessTree getProcessTree() {
+ if (pid != -1) {
+ // Get the list of processes
+ List<Integer> processList = getProcessList();
+
+ Map<Integer, ProcessInfo> allProcessInfo = new HashMap<Integer, ProcessInfo>();
+
+ // cache the processTree to get the age for processes
+ Map<Integer, ProcessInfo> oldProcs =
+ new HashMap<Integer, ProcessInfo>(processTree);
+ processTree.clear();
+
+ ProcessInfo me = null;
+ for (Integer proc : processList) {
+ // Get information for each process
+ ProcessInfo pInfo = new ProcessInfo(proc);
+ if (constructProcessInfo(pInfo, procfsDir) != null) {
+ allProcessInfo.put(proc, pInfo);
+ if (proc.equals(this.pid)) {
+ me = pInfo; // cache 'me'
+ processTree.put(proc, pInfo);
+ }
+ }
+ }
+
+ if (me == null) {
+ return this;
+ }
+
+ // Add each process to its parent.
+ for (Map.Entry<Integer, ProcessInfo> entry : allProcessInfo.entrySet()) {
+ Integer pID = entry.getKey();
+ if (pID != 1) {
+ ProcessInfo pInfo = entry.getValue();
+ ProcessInfo parentPInfo = allProcessInfo.get(pInfo.getPpid());
+ if (parentPInfo != null) {
+ parentPInfo.addChild(pInfo);
+ }
+ }
+ }
+
+ // now start constructing the process-tree
+ LinkedList<ProcessInfo> pInfoQueue = new LinkedList<ProcessInfo>();
+ pInfoQueue.addAll(me.getChildren());
+ while (!pInfoQueue.isEmpty()) {
+ ProcessInfo pInfo = pInfoQueue.remove();
+ if (!processTree.containsKey(pInfo.getPid())) {
+ processTree.put(pInfo.getPid(), pInfo);
+ }
+ pInfoQueue.addAll(pInfo.getChildren());
+ }
+
+ // update age values and compute the number of jiffies since last update
+ for (Map.Entry<Integer, ProcessInfo> procs : processTree.entrySet()) {
+ ProcessInfo oldInfo = oldProcs.get(procs.getKey());
+ if (procs.getValue() != null) {
+ procs.getValue().updateJiffy(oldInfo);
+ if (oldInfo != null) {
+ procs.getValue().updateAge(oldInfo);
+ }
+ }
+ }
+
+ if (LOG.isDebugEnabled()) {
+ // Log.debug the ProcfsBasedProcessTree
+ LOG.debug(this.toString());
+ }
+ }
+ return this;
+ }
+
+ /** Verify that the given process id is same as its process group id.
+ * @param pidStr Process id of the to-be-verified-process
+ * @param procfsDir Procfs root dir
+ */
+ public boolean checkPidPgrpidForMatch() {
+ return checkPidPgrpidForMatch(pid, PROCFS);
+ }
+
+ public static boolean checkPidPgrpidForMatch(int _pid, String procfs) {
+ // Get information for this process
+ ProcessInfo pInfo = new ProcessInfo(_pid);
+ pInfo = constructProcessInfo(pInfo, procfs);
+ // null if process group leader finished execution; issue no warning
+ // make sure that pid and its pgrpId match
+ return pInfo == null || pInfo.getPgrpId().equals(_pid);
+ }
+
+ private static final String PROCESSTREE_DUMP_FORMAT =
+ "\t|- %d %d %d %d %s %d %d %d %d %s\n";
+
+ public List<Integer> getCurrentProcessIDs() {
+ List<Integer> currentPIDs = new ArrayList<Integer>();
+ currentPIDs.addAll(processTree.keySet());
+ return currentPIDs;
+ }
+
+ /**
+ * Get a dump of the process-tree.
+ *
+ * @return a string concatenating the dump of information of all the processes
+ * in the process-tree
+ */
+ public String getProcessTreeDump() {
+ StringBuilder ret = new StringBuilder();
+ // The header.
+ ret.append(String.format("\t|- PID PPID PGRPID SESSID CMD_NAME "
+ + "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) "
+ + "RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n"));
+ for (ProcessInfo p : processTree.values()) {
+ if (p != null) {
+ ret.append(String.format(PROCESSTREE_DUMP_FORMAT, p.getPid(), p
+ .getPpid(), p.getPgrpId(), p.getSessionId(), p.getName(), p
+ .getUtime(), p.getStime(), p.getVmem(), p.getRssmemPage(), p
+ .getCmdLine(procfsDir)));
+ }
+ }
+ return ret.toString();
+ }
+
+ /**
+ * Get the cumulative virtual memory used by all the processes in the
+ * process-tree.
+ *
+ * @return cumulative virtual memory used by the process-tree in bytes.
+ */
+ public long getCumulativeVmem() {
+ // include all processes.. all processes will be older than 0.
+ return getCumulativeVmem(0);
+ }
+
+ /**
+ * Get the cumulative resident set size (rss) memory used by all the processes
+ * in the process-tree.
+ *
+ * @return cumulative rss memory used by the process-tree in bytes. return 0
+ * if it cannot be calculated
+ */
+ public long getCumulativeRssmem() {
+ // include all processes.. all processes will be older than 0.
+ return getCumulativeRssmem(0);
+ }
+
+ /**
+ * Get the cumulative virtual memory used by all the processes in the
+ * process-tree that are older than the passed in age.
+ *
+ * @param olderThanAge processes above this age are included in the
+ * memory addition
+ * @return cumulative virtual memory used by the process-tree in bytes,
+ * for processes older than this age.
+ */
+ public long getCumulativeVmem(int olderThanAge) {
+ long total = 0;
+ for (ProcessInfo p : processTree.values()) {
+ if ((p != null) && (p.getAge() > olderThanAge)) {
+ total += p.getVmem();
+ }
+ }
+ return total;
+ }
+
+ /**
+ * Get the cumulative resident set size (rss) memory used by all the processes
+ * in the process-tree that are older than the passed in age.
+ *
+ * @param olderThanAge processes above this age are included in the
+ * memory addition
+ * @return cumulative rss memory used by the process-tree in bytes,
+ * for processes older than this age. return 0 if it cannot be
+ * calculated
+ */
+ public long getCumulativeRssmem(int olderThanAge) {
+ if (PAGE_SIZE < 0) {
+ return 0;
+ }
+ long totalPages = 0;
+ for (ProcessInfo p : processTree.values()) {
+ if ((p != null) && (p.getAge() > olderThanAge)) {
+ totalPages += p.getRssmemPage();
+ }
+ }
+ return totalPages * PAGE_SIZE; // convert # pages to byte
+ }
+
+ /**
+ * Get the CPU time in millisecond used by all the processes in the
+ * process-tree since the process-tree created
+ *
+ * @return cumulative CPU time in millisecond since the process-tree created
+ * return 0 if it cannot be calculated
+ */
+ public long getCumulativeCpuTime() {
+ if (JIFFY_LENGTH_IN_MILLIS < 0) {
+ return 0;
+ }
+ long incJiffies = 0;
+ for (ProcessInfo p : processTree.values()) {
+ if (p != null) {
+ incJiffies += p.dtime;
+ }
+ }
+ cpuTime += incJiffies * JIFFY_LENGTH_IN_MILLIS;
+ return cpuTime;
+ }
+
+ private static Integer getValidPID(String pid) {
+ Integer retPid = -1;
+ try {
+ retPid = Integer.parseInt(pid);
+ if (retPid <= 0) {
+ retPid = -1;
+ }
+ } catch (NumberFormatException nfe) {
+ retPid = -1;
+ }
+ return retPid;
+ }
+
+ /**
+ * Get the list of all processes in the system.
+ */
+ private List<Integer> getProcessList() {
+ String[] processDirs = (new File(procfsDir)).list();
+ List<Integer> processList = new ArrayList<Integer>();
+
+ for (String dir : processDirs) {
+ try {
+ int pd = Integer.parseInt(dir);
+ if ((new File(procfsDir, dir)).isDirectory()) {
+ processList.add(Integer.valueOf(pd));
+ }
+ } catch (NumberFormatException n) {
+ // skip this directory
+ } catch (SecurityException s) {
+ // skip this process
+ }
+ }
+ return processList;
+ }
+
+ /**
+ * Construct the ProcessInfo using the process' PID and procfs rooted at the
+ * specified directory and return the same. It is provided mainly to assist
+ * testing purposes.
+ *
+ * Returns null on failing to read from procfs,
+ *
+ * @param pinfo ProcessInfo that needs to be updated
+ * @param procfsDir root of the proc file system
+ * @return updated ProcessInfo, null on errors.
+ */
+ private static ProcessInfo constructProcessInfo(ProcessInfo pinfo,
+ String procfsDir) {
+ ProcessInfo ret = null;
+ // Read "procfsDir/<pid>/stat" file - typically /proc/<pid>/stat
+ BufferedReader in = null;
+ FileReader fReader = null;
+ try {
+ File pidDir = new File(procfsDir, String.valueOf(pinfo.getPid()));
+ fReader = new FileReader(new File(pidDir, PROCFS_STAT_FILE));
+ in = new BufferedReader(fReader);
+ } catch (FileNotFoundException f) {
+ // The process vanished in the interim!
+ LOG.warn("The process " + pinfo.getPid()
+ + " may have finished in the interim.");
+ return ret;
+ }
+
+ ret = pinfo;
+ try {
+ String str = in.readLine(); // only one line
+ Matcher m = PROCFS_STAT_FILE_FORMAT.matcher(str);
+ boolean mat = m.find();
+ if (mat) {
+ // Set (name) (ppid) (pgrpId) (session) (utime) (stime) (vsize) (rss)
+ pinfo.updateProcessInfo(m.group(2), Integer.parseInt(m.group(3)),
+ Integer.parseInt(m.group(4)), Integer.parseInt(m.group(5)),
+ Long.parseLong(m.group(7)), Long.parseLong(m.group(8)),
+ Long.parseLong(m.group(10)), Long.parseLong(m.group(11)));
+ } else {
+ LOG.warn("Unexpected: procfs stat file is not in the expected format"
+ + " for process with pid " + pinfo.getPid());
+ ret = null;
+ }
+ } catch (IOException io) {
+ LOG.warn("Error reading the stream " + io);
+ ret = null;
+ } finally {
+ // Close the streams
+ try {
+ fReader.close();
+ try {
+ in.close();
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + in);
+ }
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + fReader);
+ }
+ }
+
+ return ret;
+ }
+ /**
+ * Returns a string printing PIDs of process present in the
+ * ProcfsBasedProcessTree. Output format : [pid pid ..]
+ */
+ public String toString() {
+ StringBuffer pTree = new StringBuffer("[ ");
+ for (Integer p : processTree.keySet()) {
+ pTree.append(p);
+ pTree.append(" ");
+ }
+ return pTree.substring(0, pTree.length()) + "]";
+ }
+
+ /**
+ *
+ * Class containing information of a process.
+ *
+ */
+ private static class ProcessInfo {
+ private Integer pid; // process-id
+ private String name; // command name
+ private Integer pgrpId; // process group-id
+ private Integer ppid; // parent process-id
+ private Integer sessionId; // session-id
+ private Long vmem; // virtual memory usage
+ private Long rssmemPage; // rss memory usage in # of pages
+ private Long utime = 0L; // # of jiffies in user mode
+ private Long stime = 0L; // # of jiffies in kernel mode
+ // how many times has this process been seen alive
+ private int age;
+
+ // # of jiffies used since last update:
+ private Long dtime = 0L;
+ // dtime = (utime + stime) - (utimeOld + stimeOld)
+ // We need this to compute the cumulative CPU time
+ // because the subprocess may finish earlier than root process
+
+ private List<ProcessInfo> children = new ArrayList<ProcessInfo>(); // list of children
+
+ public ProcessInfo(int pid) {
+ this.pid = Integer.valueOf(pid);
+ // seeing this the first time.
+ this.age = 1;
+ }
+
+ public Integer getPid() {
+ return pid;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public Integer getPgrpId() {
+ return pgrpId;
+ }
+
+ public Integer getPpid() {
+ return ppid;
+ }
+
+ public Integer getSessionId() {
+ return sessionId;
+ }
+
+ public Long getVmem() {
+ return vmem;
+ }
+
+ public Long getUtime() {
+ return utime;
+ }
+
+ public Long getStime() {
+ return stime;
+ }
+
+ public Long getDtime() {
+ return dtime;
+ }
+
+ public Long getRssmemPage() { // get rss # of pages
+ return rssmemPage;
+ }
+
+ public int getAge() {
+ return age;
+ }
+
+ public boolean isParent(ProcessInfo p) {
+ if (pid.equals(p.getPpid())) {
+ return true;
+ }
+ return false;
+ }
+
+ public void updateProcessInfo(String name, Integer ppid, Integer pgrpId,
+ Integer sessionId, Long utime, Long stime, Long vmem, Long rssmem) {
+ this.name = name;
+ this.ppid = ppid;
+ this.pgrpId = pgrpId;
+ this.sessionId = sessionId;
+ this.utime = utime;
+ this.stime = stime;
+ this.vmem = vmem;
+ this.rssmemPage = rssmem;
+ }
+
+ public void updateJiffy(ProcessInfo oldInfo) {
+ this.dtime = (oldInfo == null ? this.utime + this.stime
+ : (this.utime + this.stime) - (oldInfo.utime + oldInfo.stime));
+ }
+
+ public void updateAge(ProcessInfo oldInfo) {
+ this.age = oldInfo.age + 1;
+ }
+
+ public boolean addChild(ProcessInfo p) {
+ return children.add(p);
+ }
+
+ public List<ProcessInfo> getChildren() {
+ return children;
+ }
+
+ public String getCmdLine(String procfsDir) {
+ String ret = "N/A";
+ if (pid == null) {
+ return ret;
+ }
+ BufferedReader in = null;
+ FileReader fReader = null;
+ try {
+ fReader =
+ new FileReader(new File(new File(procfsDir, pid.toString()),
+ PROCFS_CMDLINE_FILE));
+ } catch (FileNotFoundException f) {
+ // The process vanished in the interim!
+ return ret;
+ }
+
+ in = new BufferedReader(fReader);
+
+ try {
+ ret = in.readLine(); // only one line
+ if (ret == null) {
+ ret = "N/A";
+ } else {
+ ret = ret.replace('\0', ' '); // Replace each null char with a space
+ if (ret.equals("")) {
+ // The cmdline might be empty because the process is swapped out or
+ // is a zombie.
+ ret = "N/A";
+ }
+ }
+ } catch (IOException io) {
+ LOG.warn("Error reading the stream " + io);
+ ret = "N/A";
+ } finally {
+ // Close the streams
+ try {
+ fReader.close();
+ try {
+ in.close();
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + in);
+ }
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + fReader);
+ }
+ }
+
+ return ret;
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java
new file mode 100644
index 0000000..fe6471d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java
@@ -0,0 +1,94 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.util;
+
+import java.lang.reflect.Constructor;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.net.DNSToSwitchMapping;
+import org.apache.hadoop.net.Node;
+import org.apache.hadoop.net.NodeBase;
+import org.apache.hadoop.net.ScriptBasedMapping;
+
+public class RackResolver {
+ private static DNSToSwitchMapping dnsToSwitchMapping;
+ private static boolean initCalled = false;
+ private static final Log LOG = LogFactory.getLog(RackResolver.class);
+
+ public synchronized static void init(Configuration conf) {
+ if (initCalled) {
+ return;
+ } else {
+ initCalled = true;
+ }
+ Class<? extends DNSToSwitchMapping> dnsToSwitchMappingClass =
+ conf.getClass(
+ CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
+ ScriptBasedMapping.class,
+ DNSToSwitchMapping.class);
+ try {
+ Constructor<? extends DNSToSwitchMapping> dnsToSwitchMappingConstructor
+ = dnsToSwitchMappingClass.getConstructor();
+ dnsToSwitchMapping = dnsToSwitchMappingConstructor.newInstance();
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ /**
+ * Utility method for getting a hostname resolved to a node in the
+ * network topology. This method initializes the class with the
+ * right resolver implementation.
+ * @param conf
+ * @param hostName
+ * @return
+ */
+ public static Node resolve(Configuration conf, String hostName) {
+ init(conf);
+ return coreResolve(hostName);
+ }
+
+ /**
+ * Utility method for getting a hostname resolved to a node in the
+ * network topology. This method doesn't initialize the class.
+ * Call {@link #init(Configuration)} explicitly.
+ * @param hostName
+ * @return
+ */
+ public static Node resolve(String hostName) {
+ if (!initCalled) {
+ throw new IllegalStateException("RackResolver class not yet initialized");
+ }
+ return coreResolve(hostName);
+ }
+
+ private static Node coreResolve(String hostName) {
+ List <String> tmpList = new ArrayList<String>(1);
+ tmpList.add(hostName);
+ List <String> rNameList = dnsToSwitchMapping.resolve(tmpList);
+ String rName = rNameList.get(0);
+ LOG.info("Resolved " + hostName + " to " + rName);
+ return new NodeBase(hostName, rName);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Records.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Records.java
new file mode 100644
index 0000000..60711fd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Records.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.util;
+
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+
+/**
+ * Convenient API record utils
+ */
+public class Records {
+ // The default record factory
+ private static final RecordFactory factory =
+ RecordFactoryProvider.getRecordFactory(null);
+
+ public static <T> T newRecord(Class<T> cls) {
+ return factory.newRecordInstance(cls);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
new file mode 100644
index 0000000..b588395
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
@@ -0,0 +1,165 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.util.ReflectionUtils;
+
+/**
+ * Plugin to calculate resource information on the system.
+ *
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public abstract class ResourceCalculatorPlugin extends Configured {
+
+ /**
+ * Obtain the total size of the virtual memory present in the system.
+ *
+ * @return virtual memory size in bytes.
+ */
+ public abstract long getVirtualMemorySize();
+
+ /**
+ * Obtain the total size of the physical memory present in the system.
+ *
+ * @return physical memory size bytes.
+ */
+ public abstract long getPhysicalMemorySize();
+
+ /**
+ * Obtain the total size of the available virtual memory present
+ * in the system.
+ *
+ * @return available virtual memory size in bytes.
+ */
+ public abstract long getAvailableVirtualMemorySize();
+
+ /**
+ * Obtain the total size of the available physical memory present
+ * in the system.
+ *
+ * @return available physical memory size bytes.
+ */
+ public abstract long getAvailablePhysicalMemorySize();
+
+ /**
+ * Obtain the total number of processors present on the system.
+ *
+ * @return number of processors
+ */
+ public abstract int getNumProcessors();
+
+ /**
+ * Obtain the CPU frequency of on the system.
+ *
+ * @return CPU frequency in kHz
+ */
+ public abstract long getCpuFrequency();
+
+ /**
+ * Obtain the cumulative CPU time since the system is on.
+ *
+ * @return cumulative CPU time in milliseconds
+ */
+ public abstract long getCumulativeCpuTime();
+
+ /**
+ * Obtain the CPU usage % of the machine. Return -1 if it is unavailable
+ *
+ * @return CPU usage in %
+ */
+ public abstract float getCpuUsage();
+
+ /**
+ * Obtain resource status used by current process tree.
+ */
+ @InterfaceAudience.Private
+ @InterfaceStability.Unstable
+ public abstract ProcResourceValues getProcResourceValues();
+
+ public class ProcResourceValues {
+ private final long cumulativeCpuTime;
+ private final long physicalMemorySize;
+ private final long virtualMemorySize;
+ public ProcResourceValues(long cumulativeCpuTime, long physicalMemorySize,
+ long virtualMemorySize) {
+ this.cumulativeCpuTime = cumulativeCpuTime;
+ this.physicalMemorySize = physicalMemorySize;
+ this.virtualMemorySize = virtualMemorySize;
+ }
+ /**
+ * Obtain the physical memory size used by current process tree.
+ * @return physical memory size in bytes.
+ */
+ public long getPhysicalMemorySize() {
+ return physicalMemorySize;
+ }
+
+ /**
+ * Obtain the virtual memory size used by a current process tree.
+ * @return virtual memory size in bytes.
+ */
+ public long getVirtualMemorySize() {
+ return virtualMemorySize;
+ }
+
+ /**
+ * Obtain the cumulative CPU time used by a current process tree.
+ * @return cumulative CPU time in milliseconds
+ */
+ public long getCumulativeCpuTime() {
+ return cumulativeCpuTime;
+ }
+ }
+
+ /**
+ * Get the ResourceCalculatorPlugin from the class name and configure it. If
+ * class name is null, this method will try and return a memory calculator
+ * plugin available for this system.
+ *
+ * @param clazz class-name
+ * @param conf configure the plugin with this.
+ * @return ResourceCalculatorPlugin
+ */
+ public static ResourceCalculatorPlugin getResourceCalculatorPlugin(
+ Class<? extends ResourceCalculatorPlugin> clazz, Configuration conf) {
+
+ if (clazz != null) {
+ return ReflectionUtils.newInstance(clazz, conf);
+ }
+
+ // No class given, try a os specific class
+ try {
+ String osName = System.getProperty("os.name");
+ if (osName.startsWith("Linux")) {
+ return new LinuxResourceCalculatorPlugin();
+ }
+ } catch (SecurityException se) {
+ // Failed to get Operating System name.
+ return null;
+ }
+
+ // Not supported on this system.
+ return null;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Self.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Self.java
new file mode 100644
index 0000000..a656c47
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Self.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.util;
+
+/**
+ * Some utilities for introspection
+ */
+public class Self {
+ private static boolean firstTime = true;
+ private static boolean isUnitTest = false;
+ private static boolean isJUnitTest = false;
+
+ public synchronized static boolean isUnitTest() {
+ detect();
+ return isUnitTest;
+ }
+
+ public synchronized static boolean isJUnitTest() {
+ detect();
+ return isJUnitTest;
+ }
+
+ private synchronized static void detect() {
+ if (!firstTime) {
+ return;
+ }
+ firstTime = false;
+ for (StackTraceElement e : new Throwable().getStackTrace()) {
+ String className = e.getClassName();
+ if (className.startsWith("org.junit")) {
+ isUnitTest = isJUnitTest = true;
+ return;
+ }
+ if (className.startsWith("org.apache.maven.surefire")) {
+ isUnitTest = true;
+ return;
+ }
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java
new file mode 100644
index 0000000..88f7e2b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java
@@ -0,0 +1,169 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.util;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.Splitter;
+import java.util.regex.Pattern;
+
+/**
+ * Common string manipulation helpers
+ */
+public final class StringHelper {
+ // Common joiners to avoid per join creation of joiners
+ public static final Joiner SSV_JOINER = Joiner.on(' ');
+ public static final Joiner CSV_JOINER = Joiner.on(',');
+ public static final Joiner JOINER = Joiner.on("");
+ public static final Joiner _JOINER = Joiner.on('_');
+ public static final Joiner PATH_JOINER = Joiner.on('/');
+ public static final Joiner PATH_ARG_JOINER = Joiner.on("/:");
+ public static final Joiner DOT_JOINER = Joiner.on('.');
+ public static final Splitter SSV_SPLITTER =
+ Splitter.on(' ').omitEmptyStrings().trimResults();
+ public static final Splitter _SPLITTER = Splitter.on('_').trimResults();
+ private static final Pattern ABS_URL_RE =Pattern.compile("^(?:\\w+:)?//");
+
+ /**
+ * Join on space.
+ * @param args to join
+ * @return args joined by space
+ */
+ public static String sjoin(Object... args) {
+ return SSV_JOINER.join(args);
+ }
+
+ /**
+ * Join on comma.
+ * @param args to join
+ * @return args joined by comma
+ */
+ public static String cjoin(Object... args) {
+ return CSV_JOINER.join(args);
+ }
+
+ /**
+ * Join on dot
+ * @param args to join
+ * @return args joined by dot
+ */
+ public static String djoin(Object... args) {
+ return DOT_JOINER.join(args);
+ }
+
+ /**
+ * Join on underscore
+ * @param args to join
+ * @return args joined underscore
+ */
+ public static String _join(Object... args) {
+ return _JOINER.join(args);
+ }
+
+ /**
+ * Join on slash
+ * @param args to join
+ * @return args joined with slash
+ */
+ public static String pjoin(Object... args) {
+ return PATH_JOINER.join(args);
+ }
+
+ /**
+ * Join on slash & colon (e.g., path args in routing spec)
+ * @param args to join
+ * @return args joined with /:
+ */
+ public static String pajoin(Object... args) {
+ return PATH_ARG_JOINER.join(args);
+ }
+
+ /**
+ * Join without separator
+ * @param args
+ * @return joined args with no separator
+ */
+ public static String join(Object... args) {
+ return JOINER.join(args);
+ }
+
+ /**
+ * Join with a separator
+ * @param sep the separator
+ * @param args to join
+ * @return args joined with a separator
+ */
+ public static String joins(String sep, Object...args) {
+ return Joiner.on(sep).join(args);
+ }
+
+ /**
+ * Split on space & trim results.
+ * @param s the string to split
+ * @return an iterable of strings
+ */
+ public static Iterable<String> split(CharSequence s) {
+ return SSV_SPLITTER.split(s);
+ }
+
+ /**
+ * Split on _ & trim results
+ * @param s the string to split
+ * @return an iterable of strings
+ */
+ public static Iterable<String> _split(CharSequence s) {
+ return _SPLITTER.split(s);
+ }
+
+ /**
+ * Check whether a url is absolute or note
+ * @param url to check
+ * @return true if url starts with scheme:// or //
+ */
+ public static boolean isAbsUrl(CharSequence url) {
+ return ABS_URL_RE.matcher(url).find();
+ }
+
+ /**
+ * Join url components
+ * @param pathPrefix for relative urls
+ * @param args url components to join
+ * @return an url string
+ */
+ public static String ujoin(String pathPrefix, String... args) {
+ StringBuilder sb = new StringBuilder();
+ boolean first = true;
+ for (String part : args) {
+ if (first) {
+ first = false;
+ if (part.startsWith("#") || isAbsUrl(part)) {
+ sb.append(part);
+ } else {
+ sb.append('/').append(pathPrefix).append('/').append(part);
+ }
+ } else {
+ sb.append('/').append(part);
+ }
+ }
+ return sb.toString();
+ }
+
+ public static String percent(double value) {
+ return String.format("%.2f", value * 100);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java
new file mode 100644
index 0000000..dd23b3e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java
@@ -0,0 +1,43 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.util;
+
+import java.text.SimpleDateFormat;
+import java.util.Date;
+
+public class Times {
+ static final ThreadLocal<SimpleDateFormat> dateFormat =
+ new ThreadLocal<SimpleDateFormat>() {
+ @Override protected SimpleDateFormat initialValue() {
+ return new SimpleDateFormat("d-MMM-yyyy HH:mm:ss");
+ }
+ };
+
+ public static long elapsed(long started, long finished) {
+ if (finished > 0) {
+ return finished - started;
+ }
+ return started > 0 ? System.currentTimeMillis() - started : 0;
+ }
+
+ public static String format(long ts) {
+ return ts > 0 ? String.valueOf(dateFormat.get().format(new Date(ts)))
+ : "N/A";
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java
new file mode 100644
index 0000000..2aa67eb
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.YarnVersionAnnotation;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This class finds the package info for Yarn and the YarnVersionAnnotation
+ * information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class YarnVersionInfo {
+ private static final Log LOG = LogFactory.getLog(YarnVersionInfo.class);
+
+ private static Package myPackage;
+ private static YarnVersionAnnotation version;
+
+ static {
+ myPackage = YarnVersionAnnotation.class.getPackage();
+ version = myPackage.getAnnotation(YarnVersionAnnotation.class);
+ }
+
+ /**
+ * Get the meta-data for the Yarn package.
+ * @return
+ */
+ static Package getPackage() {
+ return myPackage;
+ }
+
+ /**
+ * Get the Yarn version.
+ * @return the Yarn version string, eg. "0.6.3-dev"
+ */
+ public static String getVersion() {
+ return version != null ? version.version() : "Unknown";
+ }
+
+ /**
+ * Get the subversion revision number for the root directory
+ * @return the revision number, eg. "451451"
+ */
+ public static String getRevision() {
+ return version != null ? version.revision() : "Unknown";
+ }
+
+ /**
+ * Get the branch on which this originated.
+ * @return The branch name, e.g. "trunk" or "branches/branch-0.20"
+ */
+ public static String getBranch() {
+ return version != null ? version.branch() : "Unknown";
+ }
+
+ /**
+ * The date that Yarn was compiled.
+ * @return the compilation date in unix date format
+ */
+ public static String getDate() {
+ return version != null ? version.date() : "Unknown";
+ }
+
+ /**
+ * The user that compiled Yarn.
+ * @return the username of the user
+ */
+ public static String getUser() {
+ return version != null ? version.user() : "Unknown";
+ }
+
+ /**
+ * Get the subversion URL for the root Yarn directory.
+ */
+ public static String getUrl() {
+ return version != null ? version.url() : "Unknown";
+ }
+
+ /**
+ * Get the checksum of the source files from which Yarn was
+ * built.
+ **/
+ public static String getSrcChecksum() {
+ return version != null ? version.srcChecksum() : "Unknown";
+ }
+
+ /**
+ * Returns the buildVersion which includes version,
+ * revision, user and date.
+ */
+ public static String getBuildVersion(){
+ return YarnVersionInfo.getVersion() +
+ " from " + YarnVersionInfo.getRevision() +
+ " by " + YarnVersionInfo.getUser() +
+ " source checksum " + YarnVersionInfo.getSrcChecksum();
+ }
+
+ public static void main(String[] args) {
+ LOG.debug("version: "+ version);
+ System.out.println("Yarn " + getVersion());
+ System.out.println("Subversion " + getUrl() + " -r " + getRevision());
+ System.out.println("Compiled by " + getUser() + " on " + getDate());
+ System.out.println("From source with checksum " + getSrcChecksum());
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Controller.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Controller.java
new file mode 100644
index 0000000..beec6fd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Controller.java
@@ -0,0 +1,252 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp;
+
+import com.google.common.collect.Maps;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.servlet.RequestScoped;
+
+import java.io.PrintWriter;
+import java.util.Map;
+import javax.servlet.http.Cookie;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import static org.apache.hadoop.yarn.util.StringHelper.*;
+
+import org.apache.hadoop.yarn.webapp.view.DefaultPage;
+
+import org.codehaus.jackson.map.ObjectMapper;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public abstract class Controller implements Params {
+ public static final Logger LOG = LoggerFactory.getLogger(Controller.class);
+ static final ObjectMapper jsonMapper = new ObjectMapper();
+
+ @RequestScoped
+ public static class RequestContext{
+ final Injector injector;
+ final HttpServletRequest request;
+ final HttpServletResponse response;
+ private Map<String, String> moreParams;
+ private Map<String, Cookie> cookies;
+ int status = 200; // pre 3.0 servlet-api doesn't have getStatus
+ boolean rendered = false;
+ Throwable error;
+ boolean devMode = false;
+ String prefix;
+
+ @Inject RequestContext(Injector injector, HttpServletRequest request,
+ HttpServletResponse response) {
+ this.injector = injector;
+ this.request = request;
+ this.response = response;
+ }
+
+ public int status() { return status; }
+
+ public void setStatus(int status) {
+ this.status = status;
+ response.setStatus(status);
+ }
+
+ public void setRendered(boolean rendered) {
+ this.rendered = rendered;
+ }
+
+ public Map<String, String> moreParams() {
+ if (moreParams == null) {
+ moreParams = Maps.newHashMap();
+ }
+ return moreParams; // OK
+ }
+
+ public Map<String, Cookie> cookies() {
+ if (cookies == null) {
+ cookies = Maps.newHashMap();
+ Cookie[] rcookies = request.getCookies();
+ if (rcookies != null) {
+ for (Cookie cookie : rcookies) {
+ cookies.put(cookie.getName(), cookie);
+ }
+ }
+ }
+ return cookies; // OK
+ }
+
+ public void set(String key, String value) {
+ moreParams().put(key, value);
+ }
+
+ public String get(String key, String defaultValue) {
+ String value = moreParams().get(key);
+ if (value == null) {
+ value = request.getParameter(key);
+ }
+ return value == null ? defaultValue : value;
+ }
+
+ public String prefix() { return prefix; }
+ }
+
+ private RequestContext context;
+ @Inject Injector injector;
+
+ public Controller() {
+ // Makes injection in subclasses optional.
+ // Time will tell if this buy us more than the NPEs :)
+ }
+
+ public Controller(RequestContext ctx) {
+ context = ctx;
+ }
+
+ public RequestContext context() {
+ if (context == null) {
+ if (injector == null) {
+ // One of the downsides of making injection in subclasses optional.
+ throw new WebAppException(join("Error accessing RequestContext from\n",
+ "a child constructor, either move the usage of the Controller\n",
+ "methods out of the constructor or inject the RequestContext\n",
+ "into the constructor"));
+ }
+ context = injector.getInstance(RequestContext.class);
+ }
+ return context;
+ }
+
+ public Throwable error() { return context().error; }
+
+ public int status() { return context().status; }
+
+ public void setStatus(int status) {
+ context().setStatus(status);
+ }
+
+ public boolean inDevMode() { return context().devMode; }
+
+ public Injector injector() { return context().injector; }
+
+ public <T> T getInstance(Class<T> cls) {
+ return injector.getInstance(cls);
+ }
+
+ public HttpServletRequest request() { return context().request; }
+
+ public HttpServletResponse response() { return context().response; }
+
+ public void set(String key, String value) {
+ context().set(key, value);
+ }
+
+ public String get(String key, String defaultValue) {
+ return context().get(key, defaultValue);
+ }
+
+ public String $(String key) {
+ return get(key, "");
+ }
+
+ public void setTitle(String title) {
+ set(TITLE, title);
+ }
+
+ public void setTitle(String title, String url) {
+ setTitle(title);
+ set(TITLE_LINK, url);
+ }
+
+ public ResponseInfo info(String about) {
+ return getInstance(ResponseInfo.class).about(about);
+ }
+
+ /**
+ * Get the cookies
+ * @return the cookies map
+ */
+ public Map<String, Cookie> cookies() {
+ return context().cookies();
+ }
+
+ /**
+ * Create an url from url components
+ * @param parts components to join
+ * @return an url string
+ */
+ public String url(String... parts) {
+ return ujoin(context().prefix, parts);
+ }
+
+ /**
+ * The default action.
+ */
+ public abstract void index();
+
+ public void echo() {
+ render(DefaultPage.class);
+ }
+
+ protected void render(Class<? extends View> cls) {
+ context().rendered = true;
+ getInstance(cls).render();
+ }
+
+ /**
+ * Convenience method for REST APIs (without explicit views)
+ * @param object - the object as the response (in JSON)
+ */
+ protected void renderJSON(Object object) {
+ LOG.debug("{}: {}", MimeType.JSON, object);
+ context().rendered = true;
+ context().response.setContentType(MimeType.JSON);
+ try {
+ jsonMapper.writeValue(writer(), object);
+ } catch (Exception e) {
+ throw new WebAppException(e);
+ }
+ }
+
+ protected void renderJSON(Class<? extends ToJSON> cls) {
+ context().rendered = true;
+ response().setContentType(MimeType.JSON);
+ getInstance(cls).toJSON(writer());
+ }
+
+ /**
+ * Convenience method for hello world :)
+ * @param s - the content to render as plain text
+ */
+ protected void renderText(String s) {
+ LOG.debug("{}: {}", MimeType.TEXT, s);
+ context().rendered = true;
+ response().setContentType(MimeType.TEXT);
+ writer().print(s);
+ }
+
+ protected PrintWriter writer() {
+ try {
+ return response().getWriter();
+ } catch (Exception e) {
+ throw new WebAppException(e);
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
new file mode 100644
index 0000000..ef8ab97
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
@@ -0,0 +1,229 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp;
+
+import static com.google.common.base.Preconditions.*;
+import com.google.common.collect.Iterables;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.Singleton;
+
+import java.io.IOException;
+import java.util.Timer;
+import java.util.TimerTask;
+import javax.servlet.ServletException;
+import javax.servlet.http.Cookie;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.hadoop.yarn.webapp.Controller.RequestContext;
+import org.apache.hadoop.yarn.webapp.Router.Dest;
+import org.apache.hadoop.yarn.webapp.view.ErrorPage;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The servlet that dispatch request to various controllers
+ * according to the user defined routes in the router.
+ */
+@Singleton
+public class Dispatcher extends HttpServlet {
+ private static final long serialVersionUID = 1L;
+ static final Logger LOG = LoggerFactory.getLogger(Dispatcher.class);
+ static final String ERROR_COOKIE = "last-error";
+ static final String STATUS_COOKIE = "last-status";
+
+ private transient final Injector injector;
+ private transient final Router router;
+ private transient final WebApp webApp;
+ private volatile boolean devMode = false;
+
+ @Inject
+ Dispatcher(WebApp webApp, Injector injector, Router router) {
+ this.webApp = webApp;
+ this.injector = injector;
+ this.router = router;
+ }
+
+ @Override
+ public void doOptions(HttpServletRequest req, HttpServletResponse res) {
+ // for simplicity
+ res.setHeader("Allow", "GET, POST");
+ }
+
+ @Override
+ public void service(HttpServletRequest req, HttpServletResponse res)
+ throws ServletException, IOException {
+ res.setCharacterEncoding("UTF-8");
+ String uri = req.getRequestURI();
+ if (uri == null) {
+ uri = "/";
+ }
+ if (devMode && uri.equals("/__stop")) {
+ // quick hack to restart servers in dev mode without OS commands
+ res.setStatus(res.SC_NO_CONTENT);
+ LOG.info("dev mode restart requested");
+ prepareToExit();
+ return;
+ }
+ String method = req.getMethod();
+ if (method.equals("OPTIONS")) {
+ doOptions(req, res);
+ return;
+ }
+ if (method.equals("TRACE")) {
+ doTrace(req, res);
+ return;
+ }
+ if (method.equals("HEAD")) {
+ doGet(req, res); // default to bad request
+ return;
+ }
+ String pathInfo = req.getPathInfo();
+ if (pathInfo == null) {
+ pathInfo = "/";
+ }
+ Controller.RequestContext rc =
+ injector.getInstance(Controller.RequestContext.class);
+ if (setCookieParams(rc, req) > 0) {
+ Cookie ec = rc.cookies().get(ERROR_COOKIE);
+ if (ec != null) {
+ rc.setStatus(Integer.parseInt(rc.cookies().
+ get(STATUS_COOKIE).getValue()));
+ removeErrorCookies(res, uri);
+ rc.set(Params.ERROR_DETAILS, ec.getValue());
+ render(ErrorPage.class);
+ return;
+ }
+ }
+ rc.prefix = webApp.name();
+ Router.Dest dest = null;
+ try {
+ dest = router.resolve(method, pathInfo);
+ } catch (WebAppException e) {
+ rc.error = e;
+ if (!e.getMessage().contains("not found")) {
+ rc.setStatus(res.SC_INTERNAL_SERVER_ERROR);
+ render(ErrorPage.class);
+ return;
+ }
+ }
+ if (dest == null) {
+ rc.setStatus(res.SC_NOT_FOUND);
+ render(ErrorPage.class);
+ return;
+ }
+ rc.devMode = devMode;
+ setMoreParams(rc, pathInfo, dest);
+ Controller controller = injector.getInstance(dest.controllerClass);
+ try {
+ // TODO: support args converted from /path/:arg1/...
+ dest.action.invoke(controller, (Object[]) null);
+ if (!rc.rendered) {
+ if (dest.defaultViewClass != null) {
+ render(dest.defaultViewClass);
+ } else if (rc.status == 200) {
+ throw new IllegalStateException("No view rendered for 200");
+ }
+ }
+ } catch (Exception e) {
+ LOG.error("error handling URI: "+ uri, e);
+ // Page could be half rendered (but still not flushed). So redirect.
+ redirectToErrorPage(res, e, uri, devMode);
+ }
+ }
+
+ public static void redirectToErrorPage(HttpServletResponse res, Throwable e,
+ String path, boolean devMode) {
+ String st = devMode ? ErrorPage.toStackTrace(e, 1024 * 3) // spec: min 4KB
+ : "See logs for stack trace";
+ res.setStatus(res.SC_FOUND);
+ Cookie cookie = new Cookie(STATUS_COOKIE, String.valueOf(500));
+ cookie.setPath(path);
+ res.addCookie(cookie);
+ cookie = new Cookie(ERROR_COOKIE, st);
+ cookie.setPath(path);
+ res.addCookie(cookie);
+ res.setHeader("Location", path);
+ }
+
+ public static void removeErrorCookies(HttpServletResponse res, String path) {
+ removeCookie(res, ERROR_COOKIE, path);
+ removeCookie(res, STATUS_COOKIE, path);
+ }
+
+ public static void removeCookie(HttpServletResponse res, String name,
+ String path) {
+ LOG.debug("removing cookie {} on {}", name, path);
+ Cookie c = new Cookie(name, "");
+ c.setMaxAge(0);
+ c.setPath(path);
+ res.addCookie(c);
+ }
+
+ private void render(Class<? extends View> cls) {
+ injector.getInstance(cls).render();
+ }
+
+ // /path/foo/bar with /path/:arg1/:arg2 will set {arg1=>foo, arg2=>bar}
+ private void setMoreParams(RequestContext rc, String pathInfo, Dest dest) {
+ checkState(pathInfo.startsWith(dest.prefix), "prefix should match");
+ if (dest.pathParams.size() == 0 ||
+ dest.prefix.length() == pathInfo.length()) {
+ return;
+ }
+ String[] parts = Iterables.toArray(WebApp.pathSplitter.split(
+ pathInfo.substring(dest.prefix.length())), String.class);
+ LOG.debug("parts={}, params={}", parts, dest.pathParams);
+ for (int i = 0; i < dest.pathParams.size() && i < parts.length; ++i) {
+ String key = dest.pathParams.get(i);
+ if (key.charAt(0) == ':') {
+ rc.moreParams().put(key.substring(1), parts[i]);
+ }
+ }
+ }
+
+ private int setCookieParams(RequestContext rc, HttpServletRequest req) {
+ Cookie[] cookies = req.getCookies();
+ if (cookies != null) {
+ for (Cookie cookie : cookies) {
+ rc.cookies().put(cookie.getName(), cookie);
+ }
+ return cookies.length;
+ }
+ return 0;
+ }
+
+ public void setDevMode(boolean choice) {
+ devMode = choice;
+ }
+
+ private void prepareToExit() {
+ checkState(devMode, "only in dev mode");
+ new Timer("webapp exit", true).schedule(new TimerTask() {
+ @Override public void run() {
+ LOG.info("WebAppp /{} exiting...", webApp.name());
+ webApp.stop();
+ System.exit(0); // FINDBUG: this is intended in dev mode
+ }
+ }, 18); // enough time for the last local request to complete
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/MimeType.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/MimeType.java
new file mode 100644
index 0000000..5f82000
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/MimeType.java
@@ -0,0 +1,28 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp;
+
+public interface MimeType {
+
+ public static final String TEXT = "text/plain; charset=UTF-8";
+ public static final String HTML = "text/html; charset=UTF-8";
+ public static final String XML = "text/xml; charset=UTF-8";
+ public static final String HTTP = "message/http; charset=UTF-8";
+ public static final String JSON = "application/json; charset=UTF-8";
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Params.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Params.java
new file mode 100644
index 0000000..f9768f7
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Params.java
@@ -0,0 +1,31 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp;
+
+/**
+ * Public static constants for webapp parameters. Do NOT put any
+ * private or application specific constants here as they're part of
+ * the API for users of the controllers and views.
+ */
+public interface Params {
+ static final String TITLE = "title";
+ static final String TITLE_LINK = "title.href";
+ static final String USER = "user";
+ static final String ERROR_DETAILS = "error.details";
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java
new file mode 100644
index 0000000..144a392
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java
@@ -0,0 +1,87 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp;
+
+import com.google.common.collect.Lists;
+import com.google.inject.servlet.RequestScoped;
+
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * A class to help passing around request scoped info
+ */
+@RequestScoped
+public class ResponseInfo implements Iterable<ResponseInfo.Item> {
+
+ public static class Item {
+ public final String key;
+ public final String url;
+ public final Object value;
+
+ Item(String key, String url, Object value) {
+ this.key = key;
+ this.url = url;
+ this.value = value;
+ }
+
+ public static Item of(String key, Object value) {
+ return new Item(key, null, value);
+ }
+
+ public static Item of(String key, String url, Object value) {
+ return new Item(key, url, value);
+ }
+ }
+
+ final List<Item> items = Lists.newArrayList();
+ String about = "Info";
+
+ // Do NOT add any constructors here, unless...
+
+ public static ResponseInfo $about(String about) {
+ ResponseInfo info = new ResponseInfo();
+ info.about = about;
+ return info;
+ }
+
+ public ResponseInfo about(String about) {
+ this.about = about;
+ return this;
+ }
+
+ public String about() {
+ return about;
+ }
+
+ public ResponseInfo _(String key, Object value) {
+ items.add(Item.of(key, value));
+ return this;
+ }
+
+ public ResponseInfo _(String key, String url, Object anchor) {
+ items.add(Item.of(key, url, anchor));
+ return this;
+ }
+
+ @Override
+ public Iterator<Item> iterator() {
+ return items.iterator();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Router.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Router.java
new file mode 100644
index 0000000..1f7c8d6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Router.java
@@ -0,0 +1,286 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp;
+
+import com.google.common.base.CharMatcher;
+import static com.google.common.base.Preconditions.*;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Maps;
+
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.regex.Pattern;
+
+import org.apache.commons.lang.StringUtils;
+
+import static org.apache.hadoop.yarn.util.StringHelper.*;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Manages path info to controller#action routing.
+ */
+class Router {
+ static final Logger LOG = LoggerFactory.getLogger(Router.class);
+ static final ImmutableList<String> EMPTY_LIST = ImmutableList.of();
+ static final CharMatcher SLASH = CharMatcher.is('/');
+ static final Pattern controllerRe =
+ Pattern.compile("^/[A-Za-z_]\\w*(?:/.*)?");
+
+ static class Dest {
+ final String prefix;
+ final ImmutableList<String> pathParams;
+ final Method action;
+ final Class<? extends Controller> controllerClass;
+ Class<? extends View> defaultViewClass;
+ final EnumSet<WebApp.HTTP> methods;
+
+ Dest(String path, Method method, Class<? extends Controller> cls,
+ List<String> pathParams, WebApp.HTTP httpMethod) {
+ prefix = checkNotNull(path);
+ action = checkNotNull(method);
+ controllerClass = checkNotNull(cls);
+ this.pathParams = pathParams != null ? ImmutableList.copyOf(pathParams)
+ : EMPTY_LIST;
+ methods = EnumSet.of(httpMethod);
+ }
+ }
+
+ Class<?> hostClass; // starting point to look for default classes
+
+ final TreeMap<String, Dest> routes = Maps.newTreeMap(); // path->dest
+
+ /**
+ * Add a route to the router.
+ * e.g., add(GET, "/foo/show", FooController.class, "show", [name...]);
+ * The name list is from /foo/show/:name/...
+ */
+ synchronized Dest add(WebApp.HTTP httpMethod, String path,
+ Class<? extends Controller> cls,
+ String action, List<String> names) {
+ LOG.debug("adding {}({})->{}#{}", new Object[]{path, names, cls, action});
+ Dest dest = addController(httpMethod, path, cls, action, names);
+ addDefaultView(dest);
+ return dest;
+ }
+
+ private Dest addController(WebApp.HTTP httpMethod, String path,
+ Class<? extends Controller> cls,
+ String action, List<String> names) {
+ for (Method method : cls.getDeclaredMethods()) {
+ if (method.getName().equals(action) &&
+ method.getParameterTypes().length == 0 &&
+ Modifier.isPublic(method.getModifiers())) {
+ // TODO: deal with parameters using the names
+ Dest dest = routes.get(path);
+ if (dest == null) {
+ method.setAccessible(true); // avoid any runtime checks
+ dest = new Dest(path, method, cls, names, httpMethod);
+ routes.put(path, dest);
+ return dest;
+ }
+ dest.methods.add(httpMethod);
+ return dest;
+ }
+ }
+ throw new WebAppException(action + "() not found in " + cls);
+ }
+
+ private void addDefaultView(Dest dest) {
+ String controllerName = dest.controllerClass.getSimpleName();
+ if (controllerName.endsWith("Controller")) {
+ controllerName = controllerName.substring(0,
+ controllerName.length() - 10);
+ }
+ dest.defaultViewClass = find(View.class,
+ dest.controllerClass.getPackage().getName(),
+ join(controllerName + "View"));
+ }
+
+ void setHostClass(Class<?> cls) {
+ hostClass = cls;
+ }
+
+ /**
+ * Resolve a path to a destination.
+ */
+ synchronized Dest resolve(String httpMethod, String path) {
+ WebApp.HTTP method = WebApp.HTTP.valueOf(httpMethod); // can throw
+ Dest dest = lookupRoute(method, path);
+ if (dest == null) {
+ return resolveDefault(method, path);
+ }
+ return dest;
+ }
+
+ private Dest lookupRoute(WebApp.HTTP method, String path) {
+ String key = path;
+ do {
+ Dest dest = routes.get(key);
+ if (dest != null && methodAllowed(method, dest)) {
+ if ((Object)key == path) { // shut up warnings
+ LOG.debug("exact match for {}: {}", key, dest.action);
+ return dest;
+ } else if (isGoodMatch(dest, path)) {
+ LOG.debug("prefix match2 for {}: {}", key, dest.action);
+ return dest;
+ }
+ return resolveAction(method, dest, path);
+ }
+ Map.Entry<String, Dest> lower = routes.lowerEntry(key);
+ if (lower == null) {
+ return null;
+ }
+ dest = lower.getValue();
+ if (prefixMatches(dest, path)) {
+ if (methodAllowed(method, dest)) {
+ if (isGoodMatch(dest, path)) {
+ LOG.debug("prefix match for {}: {}", lower.getKey(), dest.action);
+ return dest;
+ }
+ return resolveAction(method, dest, path);
+ }
+ // check other candidates
+ int slashPos = key.lastIndexOf('/');
+ key = slashPos > 0 ? path.substring(0, slashPos) : "/";
+ } else {
+ key = "/";
+ }
+ } while (true);
+ }
+
+ static boolean methodAllowed(WebApp.HTTP method, Dest dest) {
+ // Accept all methods by default, unless explicity configured otherwise.
+ return dest.methods.contains(method) || (dest.methods.size() == 1 &&
+ dest.methods.contains(WebApp.HTTP.GET));
+ }
+
+ static boolean prefixMatches(Dest dest, String path) {
+ LOG.debug("checking prefix {}{} for path: {}", new Object[]{dest.prefix,
+ dest.pathParams, path});
+ if (!path.startsWith(dest.prefix)) {
+ return false;
+ }
+ int prefixLen = dest.prefix.length();
+ if (prefixLen > 1 && path.length() > prefixLen &&
+ path.charAt(prefixLen) != '/') {
+ return false;
+ }
+ // prefix is / or prefix is path or prefix/...
+ return true;
+ }
+
+ static boolean isGoodMatch(Dest dest, String path) {
+ if (SLASH.countIn(dest.prefix) > 1) {
+ return true;
+ }
+ // We want to match (/foo, :a) for /foo/bar/blah and (/, :a) for /123
+ // but NOT / for /foo or (/, :a) for /foo or /foo/ because default route
+ // (FooController#index) for /foo and /foo/ takes precedence.
+ if (dest.prefix.length() == 1) {
+ return dest.pathParams.size() > 0 && !maybeController(path);
+ }
+ return dest.pathParams.size() > 0 || // /foo should match /foo/
+ (path.endsWith("/") && SLASH.countIn(path) == 2);
+ }
+
+ static boolean maybeController(String path) {
+ return controllerRe.matcher(path).matches();
+ }
+
+ // Assume /controller/action style path
+ private Dest resolveDefault(WebApp.HTTP method, String path) {
+ List<String> parts = WebApp.parseRoute(path);
+ String controller = parts.get(WebApp.R_CONTROLLER);
+ String action = parts.get(WebApp.R_ACTION);
+ // NameController is encouraged default
+ Class<? extends Controller> cls = find(Controller.class,
+ join(controller, "Controller"));
+ if (cls == null) {
+ cls = find(Controller.class, controller);
+ }
+ if (cls == null) {
+ throw new WebAppException(join(path, ": controller for ", controller,
+ " not found"));
+ }
+ return add(method, defaultPrefix(controller, action), cls, action, null);
+ }
+
+ private String defaultPrefix(String controller, String action) {
+ if (controller.equals("default") && action.equals("index")) {
+ return "/";
+ }
+ if (action.equals("index")) {
+ return join('/', controller);
+ }
+ return pjoin("", controller, action);
+ }
+
+ private <T> Class<? extends T> find(Class<T> cls, String cname) {
+ String pkg = hostClass.getPackage().getName();
+ return find(cls, pkg, cname);
+ }
+
+ private <T> Class<? extends T> find(Class<T> cls, String pkg, String cname) {
+ String name = StringUtils.capitalize(cname);
+ Class<? extends T> found = load(cls, djoin(pkg, name));
+ if (found == null) {
+ found = load(cls, djoin(pkg, "webapp", name));
+ }
+ if (found == null) {
+ found = load(cls, join(hostClass.getName(), '$', name));
+ }
+ return found;
+ }
+
+ @SuppressWarnings("unchecked")
+ private <T> Class<? extends T> load(Class<T> cls, String className) {
+ LOG.debug("trying: {}", className);
+ try {
+ Class<?> found = Class.forName(className);
+ if (cls.isAssignableFrom(found)) {
+ LOG.debug("found {}", className);
+ return (Class<? extends T>) found;
+ }
+ LOG.warn("found a {} but it's not a {}", className, cls.getName());
+ } catch (ClassNotFoundException e) {
+ // OK in this case.
+ }
+ return null;
+ }
+
+ // Dest may contain a candidate controller
+ private Dest resolveAction(WebApp.HTTP method, Dest dest, String path) {
+ if (dest.prefix.length() == 1) {
+ return null;
+ }
+ checkState(!isGoodMatch(dest, path), dest.prefix);
+ checkState(SLASH.countIn(path) > 1, path);
+ List<String> parts = WebApp.parseRoute(path);
+ String controller = parts.get(WebApp.R_CONTROLLER);
+ String action = parts.get(WebApp.R_ACTION);
+ return add(method, pjoin("", controller, action), dest.controllerClass,
+ action, null);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/SubView.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/SubView.java
new file mode 100644
index 0000000..f6b48bc
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/SubView.java
@@ -0,0 +1,29 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp;
+
+/**
+ * Interface for SubView to avoid top-level inclusion
+ */
+public interface SubView {
+ /**
+ * render the sub-view
+ */
+ void renderPartial();
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ToJSON.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ToJSON.java
new file mode 100644
index 0000000..1204c4a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ToJSON.java
@@ -0,0 +1,28 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp;
+
+import java.io.PrintWriter;
+
+/**
+ * A light-weight JSON rendering interface
+ */
+public interface ToJSON {
+ void toJSON(PrintWriter out);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/View.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/View.java
new file mode 100644
index 0000000..acaee87
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/View.java
@@ -0,0 +1,215 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp;
+
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.servlet.RequestScoped;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.Map;
+import javax.servlet.ServletOutputStream;
+import javax.servlet.http.Cookie;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import static org.apache.hadoop.yarn.util.StringHelper.*;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Base class for all views
+ */
+public abstract class View implements Params {
+ public static final Logger LOG = LoggerFactory.getLogger(View.class);
+
+ @RequestScoped
+ public static class ViewContext {
+ final Controller.RequestContext rc;
+ int nestLevel = 0;
+ boolean wasInline;
+
+ @Inject ViewContext(Controller.RequestContext ctx) {
+ rc = ctx;
+ }
+
+ public int nestLevel() { return nestLevel; }
+ public boolean wasInline() { return wasInline; }
+
+ public void set(int nestLevel, boolean wasInline) {
+ this.nestLevel = nestLevel;
+ this.wasInline = wasInline;
+ }
+
+ public Controller.RequestContext requestContext() { return rc; }
+ }
+
+ private ViewContext vc;
+ @Inject Injector injector;
+
+ public View() {
+ // Makes injection in subclasses optional.
+ // Time will tell if this buy us more than the NPEs :)
+ }
+
+ public View(ViewContext ctx) {
+ vc = ctx;
+ }
+
+ /**
+ * The API to render the view
+ */
+ public abstract void render();
+
+ public ViewContext context() {
+ if (vc == null) {
+ if (injector == null) {
+ // One downside of making the injection in subclasses optional
+ throw new WebAppException(join("Error accessing ViewContext from a\n",
+ "child constructor, either move the usage of the View methods\n",
+ "out of the constructor or inject the ViewContext into the\n",
+ "constructor"));
+ }
+ vc = injector.getInstance(ViewContext.class);
+ }
+ return vc;
+ }
+
+ public Throwable error() { return context().rc.error; }
+
+ public int status() { return context().rc.status; }
+
+ public boolean inDevMode() { return context().rc.devMode; }
+
+ public Injector injector() { return context().rc.injector; }
+
+ public <T> T getInstance(Class<T> cls) {
+ return injector().getInstance(cls);
+ }
+
+ public HttpServletRequest request() {
+ return context().rc.request;
+ }
+
+ public HttpServletResponse response() {
+ return context().rc.response;
+ }
+
+ public Map<String, String> moreParams() {
+ return context().rc.moreParams();
+ }
+
+ /**
+ * Get the cookies
+ * @return the cookies map
+ */
+ public Map<String, Cookie> cookies() {
+ return context().rc.cookies();
+ }
+
+ public ServletOutputStream outputStream() {
+ try {
+ return response().getOutputStream();
+ } catch (IOException e) {
+ throw new WebAppException(e);
+ }
+ }
+
+ public PrintWriter writer() {
+ try {
+ return response().getWriter();
+ } catch (IOException e) {
+ throw new WebAppException(e);
+ }
+ }
+
+ /**
+ * Lookup a value from the current context.
+ * @param key to lookup
+ * @param defaultValue if key is missing
+ * @return the value of the key or the default value
+ */
+ public String $(String key, String defaultValue) {
+ // moreParams take precedence
+ String value = moreParams().get(key);
+ if (value == null) {
+ value = request().getParameter(key);
+ }
+ return value == null ? defaultValue : value;
+ }
+
+ /**
+ * Lookup a value from the current context
+ * @param key to lookup
+ * @return the value of the key or empty string
+ */
+ public String $(String key) {
+ return $(key, "");
+ }
+
+ /**
+ * Set a context value. (e.g. UI properties for sub views.)
+ * Try to avoid any application (vs view/ui) logic.
+ * @param key to set
+ * @param value to set
+ */
+ public void set(String key, String value) {
+ moreParams().put(key, value);
+ }
+
+ public String prefix() {
+ return context().rc.prefix;
+ }
+
+ public void setTitle(String title) {
+ set(TITLE, title);
+ }
+
+ public void setTitle(String title, String url) {
+ setTitle(title);
+ set(TITLE_LINK, url);
+ }
+
+ /**
+ * Create an url from url components
+ * @param parts components to join
+ * @return an url string
+ */
+ public String url(String... parts) {
+ return ujoin(prefix(), parts);
+ }
+
+ public ResponseInfo info(String about) {
+ return getInstance(ResponseInfo.class).about(about);
+ }
+
+ /**
+ * Render a sub-view
+ * @param cls the class of the sub-view
+ */
+ public void render(Class<? extends SubView> cls) {
+ int saved = context().nestLevel;
+ getInstance(cls).renderPartial();
+ if (context().nestLevel != saved) {
+ throw new WebAppException("View "+ cls.getSimpleName() +" not complete");
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java
new file mode 100644
index 0000000..b9afe81
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java
@@ -0,0 +1,202 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp;
+
+import com.google.common.base.CharMatcher;
+import static com.google.common.base.Preconditions.*;
+import com.google.common.base.Splitter;
+import com.google.common.collect.Lists;
+import com.google.inject.Provides;
+import com.google.inject.servlet.GuiceFilter;
+import com.google.inject.servlet.ServletModule;
+
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.yarn.util.StringHelper;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * @see WebApps for a usage example
+ */
+public abstract class WebApp extends ServletModule {
+ private static final Logger LOG = LoggerFactory.getLogger(WebApp.class);
+
+ public enum HTTP { GET, POST, HEAD, PUT, DELETE };
+
+ private volatile String name;
+ private volatile Configuration conf;
+ private volatile HttpServer httpServer;
+ private volatile GuiceFilter guiceFilter;
+ private final Router router = new Router();
+
+ // index for the parsed route result
+ static final int R_PATH = 0;
+ static final int R_CONTROLLER = 1;
+ static final int R_ACTION = 2;
+ static final int R_PARAMS = 3;
+
+ static final Splitter pathSplitter =
+ Splitter.on('/').trimResults().omitEmptyStrings();
+
+ void setHttpServer(HttpServer server) {
+ httpServer = checkNotNull(server, "http server");
+ }
+
+ @Provides public HttpServer httpServer() { return httpServer; }
+
+ public int port() {
+ return checkNotNull(httpServer, "httpServer").getPort();
+ }
+
+ public void stop() {
+ try {
+ checkNotNull(httpServer, "httpServer").stop();
+ checkNotNull(guiceFilter, "guiceFilter").destroy();
+ }
+ catch (Exception e) {
+ throw new WebAppException(e);
+ }
+ }
+
+ public void joinThread() {
+ try {
+ checkNotNull(httpServer, "httpServer").join();
+ } catch (InterruptedException e) {
+ LOG.info("interrupted", e);
+ }
+ }
+
+ void setConf(Configuration conf) { this.conf = conf; }
+
+ @Provides public Configuration conf() { return conf; }
+
+ @Provides Router router() { return router; }
+
+ @Provides WebApp webApp() { return this; }
+
+ void setName(String name) { this.name = name; }
+
+ public String name() { return this.name; }
+
+ void setHostClass(Class<?> cls) {
+ router.setHostClass(cls);
+ }
+
+ void setGuiceFilter(GuiceFilter instance) {
+ guiceFilter = instance;
+ }
+
+ @Override
+ public void configureServlets() {
+ setup();
+ serve("/", "/__stop", StringHelper.join('/', name, '*')).with(Dispatcher.class);
+ }
+
+ /**
+ * Setup of a webapp serving route.
+ * @param method the http method for the route
+ * @param pathSpec the path spec in the form of /controller/action/:args etc.
+ * @param cls the controller class
+ * @param action the controller method
+ */
+ public void route(HTTP method, String pathSpec,
+ Class<? extends Controller> cls, String action) {
+ List<String> res = parseRoute(pathSpec);
+ router.add(method, res.get(R_PATH), cls, action,
+ res.subList(R_PARAMS, res.size()));
+ }
+
+ public void route(String pathSpec, Class<? extends Controller> cls,
+ String action) {
+ route(HTTP.GET, pathSpec, cls, action);
+ }
+
+ public void route(String pathSpec, Class<? extends Controller> cls) {
+ List<String> res = parseRoute(pathSpec);
+ router.add(HTTP.GET, res.get(R_PATH), cls, res.get(R_ACTION),
+ res.subList(R_PARAMS, res.size()));
+ }
+
+
+ /**
+ * /controller/action/:args => [/controller/action, controller, action, args]
+ * /controller/:args => [/controller, controller, index, args]
+ */
+ static List<String> parseRoute(String pathSpec) {
+ List<String> result = Lists.newArrayList();
+ result.add(getPrefix(checkNotNull(pathSpec, "pathSpec")));
+ Iterable<String> parts = pathSplitter.split(pathSpec);
+ String controller = null, action = null;
+ for (String s : parts) {
+ if (controller == null) {
+ if (s.charAt(0) == ':') {
+ controller = "default";
+ result.add(controller);
+ action = "index";
+ result.add(action);
+ } else {
+ controller = s;
+ }
+ } else if (action == null) {
+ if (s.charAt(0) == ':') {
+ action = "index";
+ result.add(action);
+ } else {
+ action = s;
+ }
+ }
+ result.add(s);
+ }
+ if (controller == null) {
+ result.add("default");
+ }
+ if (action == null) {
+ result.add("index");
+ }
+ return result;
+ }
+
+ static String getPrefix(String pathSpec) {
+ int start = 0;
+ while (CharMatcher.WHITESPACE.matches(pathSpec.charAt(start))) {
+ ++start;
+ }
+ if (pathSpec.charAt(start) != '/') {
+ throw new WebAppException("Path spec syntax error: "+ pathSpec);
+ }
+ int ci = pathSpec.indexOf(':');
+ if (ci == -1) {
+ ci = pathSpec.length();
+ }
+ if (ci == 1) {
+ return "/";
+ }
+ char c;
+ do {
+ c = pathSpec.charAt(--ci);
+ } while (c == '/' || CharMatcher.WHITESPACE.matches(c));
+ return pathSpec.substring(start, ci + 1);
+ }
+
+ public abstract void setup();
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebAppException.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebAppException.java
new file mode 100644
index 0000000..09b8bdd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebAppException.java
@@ -0,0 +1,38 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp;
+
+import org.apache.hadoop.yarn.YarnException;
+
+public class WebAppException extends YarnException {
+
+ private static final long serialVersionUID = 1L;
+
+ public WebAppException(String msg) {
+ super(msg);
+ }
+
+ public WebAppException(Throwable cause) {
+ super(cause);
+ }
+
+ public WebAppException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
new file mode 100644
index 0000000..55f9010
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
@@ -0,0 +1,221 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp;
+
+import static com.google.common.base.Preconditions.*;
+import com.google.inject.AbstractModule;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.Module;
+import com.google.inject.servlet.GuiceFilter;
+
+import java.net.ConnectException;
+import java.net.URL;
+import org.apache.commons.lang.StringUtils;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.http.HttpServer;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Helpers to create an embedded webapp.
+ *
+ * <h4>Quick start:</h4>
+ * <pre>
+ * WebApp wa = WebApps.$for(myApp).start();</pre>
+ * Starts a webapp with default routes binds to 0.0.0.0 (all network interfaces)
+ * on an ephemeral port, which can be obtained with:<pre>
+ * int port = wa.port();</pre>
+ * <h4>With more options:</h4>
+ * <pre>
+ * WebApp wa = WebApps.$for(myApp).at(address, port).
+ * with(configuration).
+ * start(new WebApp() {
+ * @Override public void setup() {
+ * route("/foo/action", FooController.class);
+ * route("/foo/:id", FooController.class, "show");
+ * }
+ * });</pre>
+ */
+public class WebApps {
+ static final Logger LOG = LoggerFactory.getLogger(WebApps.class);
+
+ public static class Builder<T> {
+ final String name;
+ final Class<T> api;
+ final T application;
+ String bindAddress = "0.0.0.0";
+ int port = 0;
+ boolean findPort = false;
+ Configuration conf;
+ boolean devMode = false;
+ Module[] modules;
+
+ Builder(String name, Class<T> api, T application) {
+ this.name = name;
+ this.api = api;
+ this.application = application;
+ }
+
+ public Builder<T> at(String bindAddress) {
+ String[] parts = StringUtils.split(bindAddress, ':');
+ if (parts.length == 2) {
+ return at(parts[0], Integer.parseInt(parts[1]), true);
+ }
+ return at(bindAddress, 0, true);
+ }
+
+ public Builder<T> at(int port) {
+ return at("0.0.0.0", port, false);
+ }
+
+ public Builder<T> at(String address, int port, boolean findPort) {
+ this.bindAddress = checkNotNull(address, "bind address");
+ this.port = port;
+ this.findPort = findPort;
+ return this;
+ }
+
+ public Builder<T> with(Configuration conf) {
+ this.conf = conf;
+ return this;
+ }
+
+ public Builder<T> with(Module... modules) {
+ this.modules = modules; // OK
+ return this;
+ }
+
+ public Builder<T> inDevMode() {
+ devMode = true;
+ return this;
+ }
+
+ public WebApp start(WebApp webapp) {
+ if (webapp == null) {
+ webapp = new WebApp() {
+ @Override
+ public void setup() {
+ // Defaults should be fine in usual cases
+ }
+ };
+ }
+ webapp.setName(name);
+ if (conf == null) {
+ conf = new Configuration();
+ }
+ try {
+ if (application != null) {
+ webapp.setHostClass(application.getClass());
+ } else {
+ String cls = inferHostClass();
+ LOG.debug("setting webapp host class to {}", cls);
+ webapp.setHostClass(Class.forName(cls));
+ }
+ if (devMode) {
+ if (port > 0) {
+ try {
+ new URL("http://localhost:"+ port +"/__stop").getContent();
+ LOG.info("stopping existing webapp instance");
+ Thread.sleep(100);
+ } catch (ConnectException e) {
+ LOG.info("no existing webapp instance found: {}", e.toString());
+ } catch (Exception e) {
+ // should not be fatal
+ LOG.warn("error stopping existing instance: {}", e.toString());
+ }
+ } else {
+ LOG.error("dev mode does NOT work with ephemeral port!");
+ System.exit(1);
+ }
+ }
+ HttpServer server =
+ new HttpServer(name, bindAddress, port, findPort, conf);
+ server.addGlobalFilter("guice", GuiceFilter.class.getName(), null);
+ webapp.setConf(conf);
+ webapp.setHttpServer(server);
+ server.start();
+ LOG.info("Web app /"+ name +" started at "+ server.getPort());
+ } catch (Exception e) {
+ throw new WebAppException("Error starting http server", e);
+ }
+ Injector injector = Guice.createInjector(webapp, new AbstractModule() {
+ @Override
+ protected void configure() {
+ if (api != null) {
+ bind(api).toInstance(application);
+ }
+ }
+ });
+ LOG.info("Registered webapp guice modules");
+ // save a guice filter instance for webapp stop (mostly for unit tests)
+ webapp.setGuiceFilter(injector.getInstance(GuiceFilter.class));
+ if (devMode) {
+ injector.getInstance(Dispatcher.class).setDevMode(devMode);
+ LOG.info("in dev mode!");
+ }
+ return webapp;
+ }
+
+ public WebApp start() {
+ return start(null);
+ }
+
+ private String inferHostClass() {
+ String thisClass = this.getClass().getName();
+ Throwable t = new Throwable();
+ for (StackTraceElement e : t.getStackTrace()) {
+ if (e.getClassName().equals(thisClass)) continue;
+ return e.getClassName();
+ }
+ LOG.warn("could not infer host class from", t);
+ return thisClass;
+ }
+ }
+
+ /**
+ * Create a new webapp builder.
+ * @see WebApps for a complete example
+ * @param <T> application (holding the embedded webapp) type
+ * @param prefix of the webapp
+ * @param api the api class for the application
+ * @param app the application instance
+ * @return a webapp builder
+ */
+ public static <T> Builder<T> $for(String prefix, Class<T> api, T app) {
+ return new Builder<T>(prefix, api, app);
+ }
+
+ // Short cut mostly for tests/demos
+ @SuppressWarnings("unchecked")
+ public static <T> Builder<T> $for(String prefix, T app) {
+ return $for(prefix, (Class<T>)app.getClass(), app);
+ }
+
+ // Ditto
+ public static <T> Builder<T> $for(T app) {
+ return $for("", app);
+ }
+
+ public static <T> Builder<T> $for(String prefix) {
+ return $for(prefix, null, null);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/HelloWorld.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/HelloWorld.java
new file mode 100644
index 0000000..b09a551
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/HelloWorld.java
@@ -0,0 +1,53 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.example;
+
+import org.apache.hadoop.yarn.webapp.Controller;
+import org.apache.hadoop.yarn.webapp.WebApps;
+import org.apache.hadoop.yarn.webapp.view.HtmlPage;
+
+/**
+ * The obligatory example. No xml/jsp/templates/config files! No
+ * proliferation of strange annotations either :)
+ *
+ * <p>3 in 1 example. Check results at
+ * <br>http://localhost:8888/hello and
+ * <br>http://localhost:8888/hello/html
+ * <br>http://localhost:8888/hello/json
+ */
+public class HelloWorld {
+ public static class Hello extends Controller {
+ @Override public void index() { renderText("Hello world!"); }
+ public void html() { setTitle("Hello world!"); }
+ public void json() { renderJSON("Hello world!"); }
+ }
+
+ public static class HelloView extends HtmlPage {
+ @Override protected void render(Page.HTML<_> html) {
+ html. // produces valid html 4.01 strict
+ title($("title")).
+ p("#hello-for-css").
+ _($("title"))._()._();
+ }
+ }
+
+ public static void main(String[] args) {
+ WebApps.$for(new HelloWorld()).at(8888).inDevMode().start().joinThread();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/MyApp.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/MyApp.java
new file mode 100644
index 0000000..fbbf4f8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/MyApp.java
@@ -0,0 +1,75 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.example;
+
+import com.google.inject.Inject;
+
+import org.apache.hadoop.yarn.webapp.Controller;
+import org.apache.hadoop.yarn.webapp.WebApps;
+import org.apache.hadoop.yarn.webapp.view.HtmlPage;
+
+/**
+ * The embedded UI serves two pages at:
+ * <br>http://localhost:8888/my and
+ * <br>http://localhost:8888/my/anythingYouWant
+ */
+public class MyApp {
+
+ // This is an app API
+ public String anyAPI() { return "anything ☁, really!"; }
+
+ // Note this is static so it can be in any files.
+ public static class MyController extends Controller {
+ final MyApp app;
+
+ // The app injection is optional
+ @Inject MyController(MyApp app, RequestContext ctx) {
+ super(ctx);
+ this.app = app;
+ }
+
+ @Override
+ public void index() {
+ set("anything", "something ☯");
+ }
+
+ public void anythingYouWant() {
+ set("anything", app.anyAPI());
+ }
+ }
+
+ // Ditto
+ public static class MyView extends HtmlPage {
+ // You can inject the app in views if needed.
+ @Override
+ public void render(Page.HTML<_> html) {
+ html.
+ title("My App").
+ p("#content_id_for_css_styling").
+ _("You can have", $("anything"))._()._();
+ // Note, there is no _(); (to parent element) method at root level.
+ // and IDE provides instant feedback on what level you're on in
+ // the auto-completion drop-downs.
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ WebApps.$for(new MyApp()).at(8888).inDevMode().start().joinThread();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java
new file mode 100644
index 0000000..553e0dd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java
@@ -0,0 +1,30548 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+// Generated by HamletGen. Do NOT edit!
+package org.apache.hadoop.yarn.webapp.hamlet;
+import java.io.PrintWriter;
+import java.util.EnumSet;
+import static java.util.EnumSet.*;
+import static org.apache.hadoop.yarn.webapp.hamlet.HamletImpl.EOpt.*;
+
+import org.apache.hadoop.yarn.webapp.SubView;
+
+public class Hamlet extends HamletImpl implements HamletSpec._Html {
+ public Hamlet(PrintWriter out, int nestLevel, boolean wasInline) {
+ super(out, nestLevel, wasInline);
+ }
+
+ static EnumSet<EOpt> opt(boolean endTag, boolean inline, boolean pre) {
+ EnumSet<EOpt> opts = of(ENDTAG);
+ if (!endTag) opts.remove(ENDTAG);
+ if (inline) opts.add(INLINE);
+ if (pre) opts.add(PRE);
+ return opts;
+ }
+
+ public class HTML<T extends _> extends EImp<T> implements HamletSpec.HTML {
+ public HTML(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public HTML<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public HTML<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public HEAD<HTML<T>> head() {
+ closeAttrs();
+ return head_(this, false);
+ }
+
+ @Override
+ public BODY<HTML<T>> body() {
+ closeAttrs();
+ return body_(this, false);
+ }
+
+ @Override
+ public BODY<HTML<T>> body(String selector) {
+ return setSelector(body(), selector);
+ }
+
+ @Override
+ public BASE<HTML<T>> base() {
+ closeAttrs();
+ return base_(this, false);
+ }
+
+ @Override
+ public HTML<T> base(String href) {
+ return base().$href(href)._();
+ }
+
+ @Override
+ public TITLE<HTML<T>> title() {
+ closeAttrs();
+ return title_(this, false);
+ }
+
+ @Override
+ public HTML<T> title(String cdata) {
+ return title()._(cdata)._();
+ }
+
+ @Override
+ public STYLE<HTML<T>> style() {
+ closeAttrs();
+ return style_(this, false);
+ }
+
+ @Override
+ public HTML<T> style(Object... lines) {
+ return style().$type("text/css")._(lines)._();
+ }
+
+ @Override
+ public LINK<HTML<T>> link() {
+ closeAttrs();
+ return link_(this, false);
+ }
+
+ @Override
+ public HTML<T> link(String href) {
+ return setLinkHref(link(), href)._();
+ }
+
+ @Override
+ public META<HTML<T>> meta() {
+ closeAttrs();
+ return meta_(this, false);
+ }
+
+ @Override
+ public HTML<T> meta(String name, String content) {
+ return meta().$name(name).$content(content)._();
+ }
+
+ @Override
+ public HTML<T> meta_http(String header, String content) {
+ return meta().$http_equiv(header).$content(content)._();
+ }
+
+ @Override
+ public SCRIPT<HTML<T>> script() {
+ closeAttrs();
+ return script_(this, false);
+ }
+
+ @Override
+ public HTML<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public OBJECT<HTML<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<HTML<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public TABLE<HTML<T>> table() {
+ closeAttrs();
+ return table_(this, false);
+ }
+
+ @Override
+ public TABLE<HTML<T>> table(String selector) {
+ return setSelector(table(), selector);
+ }
+
+ @Override
+ public HTML<T> address(String cdata) {
+ return address()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<HTML<T>> address() {
+ closeAttrs();
+ return address_(this, false);
+ }
+
+ @Override
+ public P<HTML<T>> p(String selector) {
+ return setSelector(p(), selector);
+ }
+
+ @Override
+ public P<HTML<T>> p() {
+ closeAttrs();
+ return p_(this, false);
+ }
+
+ @Override
+ public HTML<T> _(Class<? extends SubView> cls) {
+ _v(cls);
+ return this;
+ }
+
+ @Override
+ public HR<HTML<T>> hr() {
+ closeAttrs();
+ return hr_(this, false);
+ }
+
+ @Override
+ public HTML<T> hr(String selector) {
+ return setSelector(hr(), selector)._();
+ }
+
+ @Override
+ public DL<HTML<T>> dl(String selector) {
+ return setSelector(dl(), selector);
+ }
+
+ @Override
+ public DL<HTML<T>> dl() {
+ closeAttrs();
+ return dl_(this, false);
+ }
+
+ @Override
+ public DIV<HTML<T>> div(String selector) {
+ return setSelector(div(), selector);
+ }
+
+ @Override
+ public DIV<HTML<T>> div() {
+ closeAttrs();
+ return div_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<HTML<T>> blockquote() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<HTML<T>> bq() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public HTML<T> h1(String cdata) {
+ return h1()._(cdata)._();
+ }
+
+ @Override
+ public H1<HTML<T>> h1() {
+ closeAttrs();
+ return h1_(this, false);
+ }
+
+ @Override
+ public HTML<T> h1(String selector, String cdata) {
+ return setSelector(h1(), selector)._(cdata)._();
+ }
+
+ @Override
+ public HTML<T> h2(String cdata) {
+ return h2()._(cdata)._();
+ }
+
+ @Override
+ public H2<HTML<T>> h2() {
+ closeAttrs();
+ return h2_(this, false);
+ }
+
+ @Override
+ public HTML<T> h2(String selector, String cdata) {
+ return setSelector(h2(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H3<HTML<T>> h3() {
+ closeAttrs();
+ return h3_(this, false);
+ }
+
+ @Override
+ public HTML<T> h3(String cdata) {
+ return h3()._(cdata)._();
+ }
+
+ @Override
+ public HTML<T> h3(String selector, String cdata) {
+ return setSelector(h3(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H4<HTML<T>> h4() {
+ closeAttrs();
+ return h4_(this, false);
+ }
+
+ @Override
+ public HTML<T> h4(String cdata) {
+ return h4()._(cdata)._();
+ }
+
+ @Override
+ public HTML<T> h4(String selector, String cdata) {
+ return setSelector(h4(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H5<HTML<T>> h5() {
+ closeAttrs();
+ return h5_(this, false);
+ }
+
+ @Override
+ public HTML<T> h5(String cdata) {
+ return h5()._(cdata)._();
+ }
+
+ @Override
+ public HTML<T> h5(String selector, String cdata) {
+ return setSelector(h5(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H6<HTML<T>> h6() {
+ closeAttrs();
+ return h6_(this, false);
+ }
+
+ @Override
+ public HTML<T> h6(String cdata) {
+ return h6()._(cdata)._();
+ }
+
+ @Override
+ public HTML<T> h6(String selector, String cdata) {
+ return setSelector(h6(), selector)._(cdata)._();
+ }
+
+ @Override
+ public UL<HTML<T>> ul() {
+ closeAttrs();
+ return ul_(this, false);
+ }
+
+ @Override
+ public UL<HTML<T>> ul(String selector) {
+ return setSelector(ul(), selector);
+ }
+
+ @Override
+ public OL<HTML<T>> ol() {
+ closeAttrs();
+ return ol_(this, false);
+ }
+
+ @Override
+ public OL<HTML<T>> ol(String selector) {
+ return setSelector(ol(), selector);
+ }
+
+ @Override
+ public PRE<HTML<T>> pre() {
+ closeAttrs();
+ return pre_(this, false);
+ }
+
+ @Override
+ public PRE<HTML<T>> pre(String selector) {
+ return setSelector(pre(), selector);
+ }
+
+ @Override
+ public FORM<HTML<T>> form() {
+ closeAttrs();
+ return form_(this, false);
+ }
+
+ @Override
+ public FORM<HTML<T>> form(String selector) {
+ return setSelector(form(), selector);
+ }
+
+ @Override
+ public FIELDSET<HTML<T>> fieldset() {
+ closeAttrs();
+ return fieldset_(this, false);
+ }
+
+ @Override
+ public FIELDSET<HTML<T>> fieldset(String selector) {
+ return setSelector(fieldset(), selector);
+ }
+
+ @Override
+ public INS<HTML<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public HTML<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<HTML<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public HTML<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+ }
+
+ private <T extends _> HEAD<T> head_(T e, boolean inline) {
+ return new HEAD<T>("head", e, opt(true, inline, false)); }
+
+ private <T extends _> BODY<T> body_(T e, boolean inline) {
+ return new BODY<T>("body", e, opt(true, inline, false)); }
+
+ public class SCRIPT<T extends _> extends EImp<T> implements HamletSpec.SCRIPT {
+ public SCRIPT(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public SCRIPT<T> $type(String value) {
+ addAttr("type", value);
+ return this;
+ }
+
+ @Override
+ public SCRIPT<T> $src(String value) {
+ addAttr("src", value);
+ return this;
+ }
+
+ @Override
+ public SCRIPT<T> $charset(String value) {
+ addAttr("charset", value);
+ return this;
+ }
+
+ @Override
+ public SCRIPT<T> $defer(String value) {
+ addAttr("defer", value);
+ return this;
+ }
+
+ @Override
+ public SCRIPT<T> _(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+ }
+
+ public class STYLE<T extends _> extends EImp<T> implements HamletSpec.STYLE {
+ public STYLE(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public STYLE<T> $type(String value) {
+ addAttr("type", value);
+ return this;
+ }
+
+ @Override
+ public STYLE<T> $media(EnumSet<Media> value) {
+ addMediaAttr("media", value);
+ return this;
+ }
+
+ @Override
+ public STYLE<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public STYLE<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public STYLE<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public STYLE<T> _(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+ }
+
+ public class META<T extends _> extends EImp<T> implements HamletSpec.META {
+ public META(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public META<T> $http_equiv(String value) {
+ addAttr("http-equiv", value);
+ return this;
+ }
+
+ @Override
+ public META<T> $name(String value) {
+ addAttr("name", value);
+ return this;
+ }
+
+ @Override
+ public META<T> $content(String value) {
+ addAttr("content", value);
+ return this;
+ }
+
+ @Override
+ public META<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public META<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+ }
+
+ public class BASE<T extends _> extends EImp<T> implements HamletSpec.BASE {
+ public BASE(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public BASE<T> $href(String value) {
+ addAttr("href", value);
+ return this;
+ }
+ }
+
+ public class TITLE<T extends _> extends EImp<T> implements HamletSpec.TITLE {
+ public TITLE(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public TITLE<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public TITLE<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public TITLE<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public TITLE<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+ }
+
+ public class HEAD<T extends _> extends EImp<T> implements HamletSpec.HEAD {
+ public HEAD(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public HEAD<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public HEAD<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public BASE<HEAD<T>> base() {
+ closeAttrs();
+ return base_(this, false);
+ }
+
+ @Override
+ public HEAD<T> base(String href) {
+ return base().$href(href)._();
+ }
+
+ @Override
+ public TITLE<HEAD<T>> title() {
+ closeAttrs();
+ return title_(this, false);
+ }
+
+ @Override
+ public HEAD<T> title(String cdata) {
+ return title()._(cdata)._();
+ }
+
+ @Override
+ public STYLE<HEAD<T>> style() {
+ closeAttrs();
+ return style_(this, false);
+ }
+
+ @Override
+ public HEAD<T> style(Object... lines) {
+ return style().$type("text/css")._(lines)._();
+ }
+
+ @Override
+ public LINK<HEAD<T>> link() {
+ closeAttrs();
+ return link_(this, false);
+ }
+
+ @Override
+ public HEAD<T> link(String href) {
+ return setLinkHref(link(), href)._();
+ }
+
+ @Override
+ public META<HEAD<T>> meta() {
+ closeAttrs();
+ return meta_(this, false);
+ }
+
+ @Override
+ public HEAD<T> meta(String name, String content) {
+ return meta().$name(name).$content(content)._();
+ }
+
+ @Override
+ public HEAD<T> meta_http(String header, String content) {
+ return meta().$http_equiv(header).$content(content)._();
+ }
+
+ @Override
+ public SCRIPT<HEAD<T>> script() {
+ closeAttrs();
+ return script_(this, false);
+ }
+
+ @Override
+ public HEAD<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public OBJECT<HEAD<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<HEAD<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+ }
+
+ private <T extends _> BASE<T> base_(T e, boolean inline) {
+ return new BASE<T>("base", e, opt(false, inline, false)); }
+
+ private <T extends _> TITLE<T> title_(T e, boolean inline) {
+ return new TITLE<T>("title", e, opt(true, inline, false)); }
+
+ public class TD<T extends _> extends EImp<T> implements HamletSpec.TD {
+ public TD(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public TD<T> $headers(String value) {
+ addAttr("headers", value);
+ return this;
+ }
+
+ @Override
+ public TD<T> $scope(Scope value) {
+ addAttr("scope", value);
+ return this;
+ }
+
+ @Override
+ public TD<T> $rowspan(int value) {
+ addAttr("rowspan", value);
+ return this;
+ }
+
+ @Override
+ public TD<T> $colspan(int value) {
+ addAttr("colspan", value);
+ return this;
+ }
+
+ @Override
+ public TD<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public TD<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public TD<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public TD<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public TD<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public TD<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public TD<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public TD<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public TD<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public TD<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public TD<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public TD<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public TD<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public TD<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public TD<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public TD<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<TD<T>> table() {
+ closeAttrs();
+ return table_(this, false);
+ }
+
+ @Override
+ public TABLE<TD<T>> table(String selector) {
+ return setSelector(table(), selector);
+ }
+
+ @Override
+ public TD<T> address(String cdata) {
+ return address()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<TD<T>> address() {
+ closeAttrs();
+ return address_(this, false);
+ }
+
+ @Override
+ public P<TD<T>> p(String selector) {
+ return setSelector(p(), selector);
+ }
+
+ @Override
+ public P<TD<T>> p() {
+ closeAttrs();
+ return p_(this, false);
+ }
+
+ @Override
+ public TD<T> _(Class<? extends SubView> cls) {
+ _v(cls);
+ return this;
+ }
+
+ @Override
+ public HR<TD<T>> hr() {
+ closeAttrs();
+ return hr_(this, false);
+ }
+
+ @Override
+ public TD<T> hr(String selector) {
+ return setSelector(hr(), selector)._();
+ }
+
+ @Override
+ public DL<TD<T>> dl(String selector) {
+ return setSelector(dl(), selector);
+ }
+
+ @Override
+ public DL<TD<T>> dl() {
+ closeAttrs();
+ return dl_(this, false);
+ }
+
+ @Override
+ public DIV<TD<T>> div(String selector) {
+ return setSelector(div(), selector);
+ }
+
+ @Override
+ public DIV<TD<T>> div() {
+ closeAttrs();
+ return div_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<TD<T>> blockquote() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<TD<T>> bq() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public TD<T> h1(String cdata) {
+ return h1()._(cdata)._();
+ }
+
+ @Override
+ public H1<TD<T>> h1() {
+ closeAttrs();
+ return h1_(this, false);
+ }
+
+ @Override
+ public TD<T> h1(String selector, String cdata) {
+ return setSelector(h1(), selector)._(cdata)._();
+ }
+
+ @Override
+ public TD<T> h2(String cdata) {
+ return h2()._(cdata)._();
+ }
+
+ @Override
+ public H2<TD<T>> h2() {
+ closeAttrs();
+ return h2_(this, false);
+ }
+
+ @Override
+ public TD<T> h2(String selector, String cdata) {
+ return setSelector(h2(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H3<TD<T>> h3() {
+ closeAttrs();
+ return h3_(this, false);
+ }
+
+ @Override
+ public TD<T> h3(String cdata) {
+ return h3()._(cdata)._();
+ }
+
+ @Override
+ public TD<T> h3(String selector, String cdata) {
+ return setSelector(h3(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H4<TD<T>> h4() {
+ closeAttrs();
+ return h4_(this, false);
+ }
+
+ @Override
+ public TD<T> h4(String cdata) {
+ return h4()._(cdata)._();
+ }
+
+ @Override
+ public TD<T> h4(String selector, String cdata) {
+ return setSelector(h4(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H5<TD<T>> h5() {
+ closeAttrs();
+ return h5_(this, false);
+ }
+
+ @Override
+ public TD<T> h5(String cdata) {
+ return h5()._(cdata)._();
+ }
+
+ @Override
+ public TD<T> h5(String selector, String cdata) {
+ return setSelector(h5(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H6<TD<T>> h6() {
+ closeAttrs();
+ return h6_(this, false);
+ }
+
+ @Override
+ public TD<T> h6(String cdata) {
+ return h6()._(cdata)._();
+ }
+
+ @Override
+ public TD<T> h6(String selector, String cdata) {
+ return setSelector(h6(), selector)._(cdata)._();
+ }
+
+ @Override
+ public UL<TD<T>> ul() {
+ closeAttrs();
+ return ul_(this, false);
+ }
+
+ @Override
+ public UL<TD<T>> ul(String selector) {
+ return setSelector(ul(), selector);
+ }
+
+ @Override
+ public OL<TD<T>> ol() {
+ closeAttrs();
+ return ol_(this, false);
+ }
+
+ @Override
+ public OL<TD<T>> ol(String selector) {
+ return setSelector(ol(), selector);
+ }
+
+ @Override
+ public PRE<TD<T>> pre() {
+ closeAttrs();
+ return pre_(this, false);
+ }
+
+ @Override
+ public PRE<TD<T>> pre(String selector) {
+ return setSelector(pre(), selector);
+ }
+
+ @Override
+ public FORM<TD<T>> form() {
+ closeAttrs();
+ return form_(this, false);
+ }
+
+ @Override
+ public FORM<TD<T>> form(String selector) {
+ return setSelector(form(), selector);
+ }
+
+ @Override
+ public FIELDSET<TD<T>> fieldset() {
+ closeAttrs();
+ return fieldset_(this, false);
+ }
+
+ @Override
+ public FIELDSET<TD<T>> fieldset(String selector) {
+ return setSelector(fieldset(), selector);
+ }
+
+ @Override
+ public TD<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public TD<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<TD<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public TD<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public TD<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<TD<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public TD<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public TD<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<TD<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public TD<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public TD<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public TD<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<TD<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public TD<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<TD<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public TD<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public TD<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<TD<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public TD<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public TD<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<TD<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public TD<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public TD<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public TD<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<TD<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public TD<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<TD<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public TD<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public TD<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<TD<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public TD<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public TD<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<TD<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public TD<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public TD<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<TD<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public TD<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public TD<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<TD<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<TD<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public TD<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public TD<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<TD<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public TD<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<TD<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<TD<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<TD<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public TD<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public TD<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<TD<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public TD<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public TD<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<TD<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<TD<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public TD<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public TD<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<TD<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<TD<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public TD<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<TD<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public TD<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<TD<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public TD<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public TD<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<TD<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public TD<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<TD<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public TD<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<TD<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public TD<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<TD<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public TD<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<TD<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<TD<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<TD<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<TD<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<TD<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<TD<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public TD<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<TD<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<TD<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public TD<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class TH<T extends _> extends EImp<T> implements HamletSpec.TH {
+ public TH(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public TH<T> $headers(String value) {
+ addAttr("headers", value);
+ return this;
+ }
+
+ @Override
+ public TH<T> $scope(Scope value) {
+ addAttr("scope", value);
+ return this;
+ }
+
+ @Override
+ public TH<T> $rowspan(int value) {
+ addAttr("rowspan", value);
+ return this;
+ }
+
+ @Override
+ public TH<T> $colspan(int value) {
+ addAttr("colspan", value);
+ return this;
+ }
+
+ @Override
+ public TH<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public TH<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public TH<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public TH<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public TH<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public TH<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public TH<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public TH<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public TH<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public TH<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public TH<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public TH<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public TH<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public TH<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public TH<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public TH<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<TH<T>> table() {
+ closeAttrs();
+ return table_(this, false);
+ }
+
+ @Override
+ public TABLE<TH<T>> table(String selector) {
+ return setSelector(table(), selector);
+ }
+
+ @Override
+ public TH<T> address(String cdata) {
+ return address()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<TH<T>> address() {
+ closeAttrs();
+ return address_(this, false);
+ }
+
+ @Override
+ public P<TH<T>> p(String selector) {
+ return setSelector(p(), selector);
+ }
+
+ @Override
+ public P<TH<T>> p() {
+ closeAttrs();
+ return p_(this, false);
+ }
+
+ @Override
+ public TH<T> _(Class<? extends SubView> cls) {
+ _v(cls);
+ return this;
+ }
+
+ @Override
+ public HR<TH<T>> hr() {
+ closeAttrs();
+ return hr_(this, false);
+ }
+
+ @Override
+ public TH<T> hr(String selector) {
+ return setSelector(hr(), selector)._();
+ }
+
+ @Override
+ public DL<TH<T>> dl(String selector) {
+ return setSelector(dl(), selector);
+ }
+
+ @Override
+ public DL<TH<T>> dl() {
+ closeAttrs();
+ return dl_(this, false);
+ }
+
+ @Override
+ public DIV<TH<T>> div(String selector) {
+ return setSelector(div(), selector);
+ }
+
+ @Override
+ public DIV<TH<T>> div() {
+ closeAttrs();
+ return div_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<TH<T>> blockquote() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<TH<T>> bq() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public TH<T> h1(String cdata) {
+ return h1()._(cdata)._();
+ }
+
+ @Override
+ public H1<TH<T>> h1() {
+ closeAttrs();
+ return h1_(this, false);
+ }
+
+ @Override
+ public TH<T> h1(String selector, String cdata) {
+ return setSelector(h1(), selector)._(cdata)._();
+ }
+
+ @Override
+ public TH<T> h2(String cdata) {
+ return h2()._(cdata)._();
+ }
+
+ @Override
+ public H2<TH<T>> h2() {
+ closeAttrs();
+ return h2_(this, false);
+ }
+
+ @Override
+ public TH<T> h2(String selector, String cdata) {
+ return setSelector(h2(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H3<TH<T>> h3() {
+ closeAttrs();
+ return h3_(this, false);
+ }
+
+ @Override
+ public TH<T> h3(String cdata) {
+ return h3()._(cdata)._();
+ }
+
+ @Override
+ public TH<T> h3(String selector, String cdata) {
+ return setSelector(h3(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H4<TH<T>> h4() {
+ closeAttrs();
+ return h4_(this, false);
+ }
+
+ @Override
+ public TH<T> h4(String cdata) {
+ return h4()._(cdata)._();
+ }
+
+ @Override
+ public TH<T> h4(String selector, String cdata) {
+ return setSelector(h4(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H5<TH<T>> h5() {
+ closeAttrs();
+ return h5_(this, false);
+ }
+
+ @Override
+ public TH<T> h5(String cdata) {
+ return h5()._(cdata)._();
+ }
+
+ @Override
+ public TH<T> h5(String selector, String cdata) {
+ return setSelector(h5(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H6<TH<T>> h6() {
+ closeAttrs();
+ return h6_(this, false);
+ }
+
+ @Override
+ public TH<T> h6(String cdata) {
+ return h6()._(cdata)._();
+ }
+
+ @Override
+ public TH<T> h6(String selector, String cdata) {
+ return setSelector(h6(), selector)._(cdata)._();
+ }
+
+ @Override
+ public UL<TH<T>> ul() {
+ closeAttrs();
+ return ul_(this, false);
+ }
+
+ @Override
+ public UL<TH<T>> ul(String selector) {
+ return setSelector(ul(), selector);
+ }
+
+ @Override
+ public OL<TH<T>> ol() {
+ closeAttrs();
+ return ol_(this, false);
+ }
+
+ @Override
+ public OL<TH<T>> ol(String selector) {
+ return setSelector(ol(), selector);
+ }
+
+ @Override
+ public PRE<TH<T>> pre() {
+ closeAttrs();
+ return pre_(this, false);
+ }
+
+ @Override
+ public PRE<TH<T>> pre(String selector) {
+ return setSelector(pre(), selector);
+ }
+
+ @Override
+ public FORM<TH<T>> form() {
+ closeAttrs();
+ return form_(this, false);
+ }
+
+ @Override
+ public FORM<TH<T>> form(String selector) {
+ return setSelector(form(), selector);
+ }
+
+ @Override
+ public FIELDSET<TH<T>> fieldset() {
+ closeAttrs();
+ return fieldset_(this, false);
+ }
+
+ @Override
+ public FIELDSET<TH<T>> fieldset(String selector) {
+ return setSelector(fieldset(), selector);
+ }
+
+ @Override
+ public TH<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public TH<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<TH<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public TH<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public TH<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<TH<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public TH<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public TH<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<TH<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public TH<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public TH<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public TH<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<TH<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public TH<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<TH<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public TH<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public TH<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<TH<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public TH<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public TH<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<TH<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public TH<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public TH<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public TH<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<TH<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public TH<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<TH<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public TH<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public TH<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<TH<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public TH<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public TH<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<TH<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public TH<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public TH<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<TH<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public TH<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public TH<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<TH<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<TH<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public TH<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public TH<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<TH<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public TH<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<TH<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<TH<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<TH<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public TH<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public TH<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<TH<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public TH<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public TH<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<TH<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<TH<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public TH<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public TH<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<TH<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<TH<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public TH<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<TH<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public TH<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<TH<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public TH<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public TH<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<TH<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public TH<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<TH<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public TH<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<TH<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public TH<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<TH<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public TH<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<TH<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<TH<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<TH<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<TH<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<TH<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<TH<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public TH<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<TH<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<TH<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public TH<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class TR<T extends _> extends EImp<T> implements HamletSpec.TR {
+ public TR(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public TR<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public TR<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public TR<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public TR<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public TR<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public TR<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public TR<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public TR<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public TR<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public TR<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public TR<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public TR<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public TR<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public TR<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public TR<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public TR<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public TH<TR<T>> th() {
+ closeAttrs();
+ return th_(this, false);
+ }
+
+ @Override
+ public TR<T> th(String cdata) {
+ return th()._(cdata)._();
+ }
+
+ @Override
+ public TR<T> th(String selector, String cdata) {
+ return setSelector(th(), selector)._(cdata)._();
+ }
+
+ @Override
+ public TD<TR<T>> td() {
+ closeAttrs();
+ return td_(this, false);
+ }
+
+ @Override
+ public TR<T> td(String cdata) {
+ return td()._(cdata)._();
+ }
+
+ @Override
+ public TR<T> td(String selector, String cdata) {
+ return setSelector(td(), selector)._(cdata)._();
+ }
+ }
+
+ private <T extends _> TH<T> th_(T e, boolean inline) {
+ return new TH<T>("th", e, opt(false, inline, false)); }
+
+ private <T extends _> TD<T> td_(T e, boolean inline) {
+ return new TD<T>("td", e, opt(false, inline, false)); }
+
+ public class COL<T extends _> extends EImp<T> implements HamletSpec.COL {
+ public COL(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public COL<T> $span(int value) {
+ addAttr("span", value);
+ return this;
+ }
+
+ @Override
+ public COL<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public COL<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public COL<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public COL<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public COL<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public COL<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public COL<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public COL<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public COL<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public COL<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public COL<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public COL<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public COL<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public COL<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public COL<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public COL<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+ }
+
+ public class COLGROUP<T extends _> extends EImp<T> implements HamletSpec.COLGROUP {
+ public COLGROUP(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public COLGROUP<T> $span(int value) {
+ addAttr("span", value);
+ return this;
+ }
+
+ @Override
+ public COLGROUP<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public COLGROUP<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public COLGROUP<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public COLGROUP<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public COLGROUP<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public COLGROUP<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public COLGROUP<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public COLGROUP<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public COLGROUP<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public COLGROUP<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public COLGROUP<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public COLGROUP<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public COLGROUP<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public COLGROUP<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public COLGROUP<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public COLGROUP<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public COL<COLGROUP<T>> col() {
+ closeAttrs();
+ return col_(this, false);
+ }
+
+ @Override
+ public COLGROUP<T> col(String selector) {
+ return setSelector(col(), selector)._();
+ }
+ }
+
+ public class TBODY<T extends _> extends EImp<T> implements HamletSpec.TBODY {
+ public TBODY(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public TBODY<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public TBODY<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public TBODY<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public TBODY<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public TBODY<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public TBODY<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public TBODY<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public TBODY<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public TBODY<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public TBODY<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public TBODY<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public TBODY<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public TBODY<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public TBODY<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public TBODY<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public TBODY<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public TR<TBODY<T>> tr() {
+ closeAttrs();
+ return tr_(this, false);
+ }
+
+ @Override
+ public TR<TBODY<T>> tr(String selector) {
+ return setSelector(tr(), selector);
+ }
+ }
+
+ public class TFOOT<T extends _> extends EImp<T> implements HamletSpec.TFOOT {
+ public TFOOT(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public TFOOT<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public TFOOT<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public TFOOT<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public TFOOT<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public TFOOT<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public TFOOT<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public TFOOT<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public TFOOT<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public TFOOT<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public TFOOT<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public TFOOT<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public TFOOT<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public TFOOT<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public TFOOT<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public TFOOT<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public TFOOT<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public TR<TFOOT<T>> tr() {
+ closeAttrs();
+ return tr_(this, false);
+ }
+
+ @Override
+ public TR<TFOOT<T>> tr(String selector) {
+ return setSelector(tr(), selector);
+ }
+ }
+
+ public class THEAD<T extends _> extends EImp<T> implements HamletSpec.THEAD {
+ public THEAD(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public THEAD<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public THEAD<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public THEAD<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public THEAD<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public THEAD<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public THEAD<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public THEAD<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public THEAD<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public THEAD<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public THEAD<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public THEAD<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public THEAD<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public THEAD<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public THEAD<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public THEAD<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public THEAD<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public TR<THEAD<T>> tr() {
+ closeAttrs();
+ return tr_(this, false);
+ }
+
+ @Override
+ public TR<THEAD<T>> tr(String selector) {
+ return setSelector(tr(), selector);
+ }
+ }
+
+ public class CAPTION<T extends _> extends EImp<T> implements HamletSpec.CAPTION {
+ public CAPTION(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public CAPTION<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public CAPTION<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public CAPTION<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public CAPTION<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public CAPTION<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public CAPTION<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public CAPTION<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public CAPTION<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public CAPTION<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public CAPTION<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public CAPTION<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public CAPTION<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public CAPTION<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public CAPTION<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public CAPTION<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public CAPTION<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public CAPTION<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public CAPTION<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<CAPTION<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public CAPTION<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<CAPTION<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public CAPTION<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<CAPTION<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public CAPTION<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CAPTION<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<CAPTION<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<CAPTION<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public CAPTION<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<CAPTION<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public CAPTION<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<CAPTION<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public CAPTION<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CAPTION<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<CAPTION<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<CAPTION<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public CAPTION<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<CAPTION<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public CAPTION<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<CAPTION<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public CAPTION<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<CAPTION<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public CAPTION<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<CAPTION<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<CAPTION<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public CAPTION<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public CAPTION<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<CAPTION<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<CAPTION<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<CAPTION<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<CAPTION<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public CAPTION<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<CAPTION<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public CAPTION<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<CAPTION<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<CAPTION<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public CAPTION<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public CAPTION<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<CAPTION<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<CAPTION<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<CAPTION<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<CAPTION<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public CAPTION<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<CAPTION<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<CAPTION<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<CAPTION<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<CAPTION<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<CAPTION<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<CAPTION<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<CAPTION<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<CAPTION<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<CAPTION<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<CAPTION<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public CAPTION<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<CAPTION<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<CAPTION<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public CAPTION<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class TABLE<T extends _> extends EImp<T> implements HamletSpec.TABLE {
+ public TABLE(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public TABLE<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<T> caption(String cdata) {
+ return caption()._(cdata)._();
+ }
+
+ @Override
+ public CAPTION<TABLE<T>> caption() {
+ closeAttrs();
+ return caption_(this, false);
+ }
+
+ @Override
+ public COLGROUP<TABLE<T>> colgroup() {
+ closeAttrs();
+ return colgroup_(this, false);
+ }
+
+ @Override
+ public THEAD<TABLE<T>> thead(String selector) {
+ return setSelector(thead(), selector);
+ }
+
+ @Override
+ public THEAD<TABLE<T>> thead() {
+ closeAttrs();
+ return thead_(this, false);
+ }
+
+ @Override
+ public TFOOT<TABLE<T>> tfoot() {
+ closeAttrs();
+ return tfoot_(this, false);
+ }
+
+ @Override
+ public TFOOT<TABLE<T>> tfoot(String selector) {
+ return setSelector(tfoot(), selector);
+ }
+
+ @Override
+ public TBODY<TABLE<T>> tbody() {
+ closeAttrs();
+ return tbody_(this, false);
+ }
+
+ @Override
+ public TBODY<TABLE<T>> tbody(String selector) {
+ return setSelector(tbody(), selector);
+ }
+
+ @Override
+ public TR<TABLE<T>> tr() {
+ closeAttrs();
+ return tr_(this, false);
+ }
+
+ @Override
+ public TR<TABLE<T>> tr(String selector) {
+ return setSelector(tr(), selector);
+ }
+
+ @Override
+ public COL<TABLE<T>> col() {
+ closeAttrs();
+ return col_(this, false);
+ }
+
+ @Override
+ public TABLE<T> col(String selector) {
+ return setSelector(col(), selector)._();
+ }
+ }
+
+ private <T extends _> CAPTION<T> caption_(T e, boolean inline) {
+ return new CAPTION<T>("caption", e, opt(true, inline, false)); }
+
+ private <T extends _> COLGROUP<T> colgroup_(T e, boolean inline) {
+ return new COLGROUP<T>("colgroup", e, opt(false, inline, false)); }
+
+ private <T extends _> THEAD<T> thead_(T e, boolean inline) {
+ return new THEAD<T>("thead", e, opt(false, inline, false)); }
+
+ private <T extends _> TFOOT<T> tfoot_(T e, boolean inline) {
+ return new TFOOT<T>("tfoot", e, opt(false, inline, false)); }
+
+ private <T extends _> TBODY<T> tbody_(T e, boolean inline) {
+ return new TBODY<T>("tbody", e, opt(true, inline, false)); }
+
+ private <T extends _> COL<T> col_(T e, boolean inline) {
+ return new COL<T>("col", e, opt(false, inline, false)); }
+
+ private <T extends _> TR<T> tr_(T e, boolean inline) {
+ return new TR<T>("tr", e, opt(false, inline, false)); }
+
+ public class BUTTON<T extends _> extends EImp<T> implements HamletSpec.BUTTON {
+ public BUTTON(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public BUTTON<T> $type(ButtonType value) {
+ addAttr("type", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $name(String value) {
+ addAttr("name", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $value(String value) {
+ addAttr("value", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $disabled() {
+ addAttr("disabled", null);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $tabindex(int value) {
+ addAttr("tabindex", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $accesskey(String value) {
+ addAttr("accesskey", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $onfocus(String value) {
+ addAttr("onfocus", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $onblur(String value) {
+ addAttr("onblur", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<BUTTON<T>> table() {
+ closeAttrs();
+ return table_(this, false);
+ }
+
+ @Override
+ public TABLE<BUTTON<T>> table(String selector) {
+ return setSelector(table(), selector);
+ }
+
+ @Override
+ public BUTTON<T> address(String cdata) {
+ return address()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<BUTTON<T>> address() {
+ closeAttrs();
+ return address_(this, false);
+ }
+
+ @Override
+ public P<BUTTON<T>> p(String selector) {
+ return setSelector(p(), selector);
+ }
+
+ @Override
+ public P<BUTTON<T>> p() {
+ closeAttrs();
+ return p_(this, false);
+ }
+
+ @Override
+ public BUTTON<T> _(Class<? extends SubView> cls) {
+ _v(cls);
+ return this;
+ }
+
+ @Override
+ public HR<BUTTON<T>> hr() {
+ closeAttrs();
+ return hr_(this, false);
+ }
+
+ @Override
+ public BUTTON<T> hr(String selector) {
+ return setSelector(hr(), selector)._();
+ }
+
+ @Override
+ public DL<BUTTON<T>> dl(String selector) {
+ return setSelector(dl(), selector);
+ }
+
+ @Override
+ public DL<BUTTON<T>> dl() {
+ closeAttrs();
+ return dl_(this, false);
+ }
+
+ @Override
+ public DIV<BUTTON<T>> div(String selector) {
+ return setSelector(div(), selector);
+ }
+
+ @Override
+ public DIV<BUTTON<T>> div() {
+ closeAttrs();
+ return div_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<BUTTON<T>> blockquote() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<BUTTON<T>> bq() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public BUTTON<T> h1(String cdata) {
+ return h1()._(cdata)._();
+ }
+
+ @Override
+ public H1<BUTTON<T>> h1() {
+ closeAttrs();
+ return h1_(this, false);
+ }
+
+ @Override
+ public BUTTON<T> h1(String selector, String cdata) {
+ return setSelector(h1(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> h2(String cdata) {
+ return h2()._(cdata)._();
+ }
+
+ @Override
+ public H2<BUTTON<T>> h2() {
+ closeAttrs();
+ return h2_(this, false);
+ }
+
+ @Override
+ public BUTTON<T> h2(String selector, String cdata) {
+ return setSelector(h2(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H3<BUTTON<T>> h3() {
+ closeAttrs();
+ return h3_(this, false);
+ }
+
+ @Override
+ public BUTTON<T> h3(String cdata) {
+ return h3()._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> h3(String selector, String cdata) {
+ return setSelector(h3(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H4<BUTTON<T>> h4() {
+ closeAttrs();
+ return h4_(this, false);
+ }
+
+ @Override
+ public BUTTON<T> h4(String cdata) {
+ return h4()._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> h4(String selector, String cdata) {
+ return setSelector(h4(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H5<BUTTON<T>> h5() {
+ closeAttrs();
+ return h5_(this, false);
+ }
+
+ @Override
+ public BUTTON<T> h5(String cdata) {
+ return h5()._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> h5(String selector, String cdata) {
+ return setSelector(h5(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H6<BUTTON<T>> h6() {
+ closeAttrs();
+ return h6_(this, false);
+ }
+
+ @Override
+ public BUTTON<T> h6(String cdata) {
+ return h6()._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> h6(String selector, String cdata) {
+ return setSelector(h6(), selector)._(cdata)._();
+ }
+
+ @Override
+ public UL<BUTTON<T>> ul() {
+ closeAttrs();
+ return ul_(this, false);
+ }
+
+ @Override
+ public UL<BUTTON<T>> ul(String selector) {
+ return setSelector(ul(), selector);
+ }
+
+ @Override
+ public OL<BUTTON<T>> ol() {
+ closeAttrs();
+ return ol_(this, false);
+ }
+
+ @Override
+ public OL<BUTTON<T>> ol(String selector) {
+ return setSelector(ol(), selector);
+ }
+
+ @Override
+ public PRE<BUTTON<T>> pre() {
+ closeAttrs();
+ return pre_(this, false);
+ }
+
+ @Override
+ public PRE<BUTTON<T>> pre(String selector) {
+ return setSelector(pre(), selector);
+ }
+
+ @Override
+ public BUTTON<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<BUTTON<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<BUTTON<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<BUTTON<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<BUTTON<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<BUTTON<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<BUTTON<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<BUTTON<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<BUTTON<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<BUTTON<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<BUTTON<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<BUTTON<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<BUTTON<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<BUTTON<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<BUTTON<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public BUTTON<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<BUTTON<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<BUTTON<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<BUTTON<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<BUTTON<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<BUTTON<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<BUTTON<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<BUTTON<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public IMG<BUTTON<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<BUTTON<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<BUTTON<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<BUTTON<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<BUTTON<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public BUTTON<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public BUTTON<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+ }
+
+ public class LEGEND<T extends _> extends EImp<T> implements HamletSpec.LEGEND {
+ public LEGEND(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public LEGEND<T> $accesskey(String value) {
+ addAttr("accesskey", value);
+ return this;
+ }
+
+ @Override
+ public LEGEND<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public LEGEND<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public LEGEND<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public LEGEND<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public LEGEND<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public LEGEND<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public LEGEND<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public LEGEND<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public LEGEND<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public LEGEND<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public LEGEND<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public LEGEND<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public LEGEND<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public LEGEND<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public LEGEND<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public LEGEND<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public LEGEND<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public LEGEND<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<LEGEND<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public LEGEND<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<LEGEND<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public LEGEND<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<LEGEND<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public LEGEND<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public LEGEND<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<LEGEND<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<LEGEND<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public LEGEND<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<LEGEND<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public LEGEND<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<LEGEND<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public LEGEND<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public LEGEND<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<LEGEND<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<LEGEND<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public LEGEND<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<LEGEND<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public LEGEND<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<LEGEND<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public LEGEND<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<LEGEND<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public LEGEND<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<LEGEND<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<LEGEND<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public LEGEND<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public LEGEND<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<LEGEND<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<LEGEND<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<LEGEND<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<LEGEND<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public LEGEND<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<LEGEND<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public LEGEND<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<LEGEND<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<LEGEND<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public LEGEND<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public LEGEND<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<LEGEND<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<LEGEND<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<LEGEND<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<LEGEND<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public LEGEND<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<LEGEND<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<LEGEND<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<LEGEND<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<LEGEND<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<LEGEND<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<LEGEND<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<LEGEND<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<LEGEND<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<LEGEND<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<LEGEND<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public LEGEND<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<LEGEND<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<LEGEND<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public LEGEND<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class FIELDSET<T extends _> extends EImp<T> implements HamletSpec.FIELDSET {
+ public FIELDSET(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public FIELDSET<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public FIELDSET<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public FIELDSET<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public FIELDSET<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public FIELDSET<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public FIELDSET<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public FIELDSET<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public FIELDSET<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public FIELDSET<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public FIELDSET<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public FIELDSET<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public FIELDSET<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public FIELDSET<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public FIELDSET<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public FIELDSET<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public FIELDSET<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public LEGEND<FIELDSET<T>> legend() {
+ closeAttrs();
+ return legend_(this, false);
+ }
+
+ @Override
+ public FIELDSET<T> legend(String cdata) {
+ return legend()._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public FIELDSET<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public TABLE<FIELDSET<T>> table() {
+ closeAttrs();
+ return table_(this, false);
+ }
+
+ @Override
+ public TABLE<FIELDSET<T>> table(String selector) {
+ return setSelector(table(), selector);
+ }
+
+ @Override
+ public FIELDSET<T> address(String cdata) {
+ return address()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<FIELDSET<T>> address() {
+ closeAttrs();
+ return address_(this, false);
+ }
+
+ @Override
+ public P<FIELDSET<T>> p(String selector) {
+ return setSelector(p(), selector);
+ }
+
+ @Override
+ public P<FIELDSET<T>> p() {
+ closeAttrs();
+ return p_(this, false);
+ }
+
+ @Override
+ public FIELDSET<T> _(Class<? extends SubView> cls) {
+ _v(cls);
+ return this;
+ }
+
+ @Override
+ public HR<FIELDSET<T>> hr() {
+ closeAttrs();
+ return hr_(this, false);
+ }
+
+ @Override
+ public FIELDSET<T> hr(String selector) {
+ return setSelector(hr(), selector)._();
+ }
+
+ @Override
+ public DL<FIELDSET<T>> dl(String selector) {
+ return setSelector(dl(), selector);
+ }
+
+ @Override
+ public DL<FIELDSET<T>> dl() {
+ closeAttrs();
+ return dl_(this, false);
+ }
+
+ @Override
+ public DIV<FIELDSET<T>> div(String selector) {
+ return setSelector(div(), selector);
+ }
+
+ @Override
+ public DIV<FIELDSET<T>> div() {
+ closeAttrs();
+ return div_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<FIELDSET<T>> blockquote() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<FIELDSET<T>> bq() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public FIELDSET<T> h1(String cdata) {
+ return h1()._(cdata)._();
+ }
+
+ @Override
+ public H1<FIELDSET<T>> h1() {
+ closeAttrs();
+ return h1_(this, false);
+ }
+
+ @Override
+ public FIELDSET<T> h1(String selector, String cdata) {
+ return setSelector(h1(), selector)._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> h2(String cdata) {
+ return h2()._(cdata)._();
+ }
+
+ @Override
+ public H2<FIELDSET<T>> h2() {
+ closeAttrs();
+ return h2_(this, false);
+ }
+
+ @Override
+ public FIELDSET<T> h2(String selector, String cdata) {
+ return setSelector(h2(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H3<FIELDSET<T>> h3() {
+ closeAttrs();
+ return h3_(this, false);
+ }
+
+ @Override
+ public FIELDSET<T> h3(String cdata) {
+ return h3()._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> h3(String selector, String cdata) {
+ return setSelector(h3(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H4<FIELDSET<T>> h4() {
+ closeAttrs();
+ return h4_(this, false);
+ }
+
+ @Override
+ public FIELDSET<T> h4(String cdata) {
+ return h4()._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> h4(String selector, String cdata) {
+ return setSelector(h4(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H5<FIELDSET<T>> h5() {
+ closeAttrs();
+ return h5_(this, false);
+ }
+
+ @Override
+ public FIELDSET<T> h5(String cdata) {
+ return h5()._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> h5(String selector, String cdata) {
+ return setSelector(h5(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H6<FIELDSET<T>> h6() {
+ closeAttrs();
+ return h6_(this, false);
+ }
+
+ @Override
+ public FIELDSET<T> h6(String cdata) {
+ return h6()._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> h6(String selector, String cdata) {
+ return setSelector(h6(), selector)._(cdata)._();
+ }
+
+ @Override
+ public UL<FIELDSET<T>> ul() {
+ closeAttrs();
+ return ul_(this, false);
+ }
+
+ @Override
+ public UL<FIELDSET<T>> ul(String selector) {
+ return setSelector(ul(), selector);
+ }
+
+ @Override
+ public OL<FIELDSET<T>> ol() {
+ closeAttrs();
+ return ol_(this, false);
+ }
+
+ @Override
+ public OL<FIELDSET<T>> ol(String selector) {
+ return setSelector(ol(), selector);
+ }
+
+ @Override
+ public PRE<FIELDSET<T>> pre() {
+ closeAttrs();
+ return pre_(this, false);
+ }
+
+ @Override
+ public PRE<FIELDSET<T>> pre(String selector) {
+ return setSelector(pre(), selector);
+ }
+
+ @Override
+ public FORM<FIELDSET<T>> form() {
+ closeAttrs();
+ return form_(this, false);
+ }
+
+ @Override
+ public FORM<FIELDSET<T>> form(String selector) {
+ return setSelector(form(), selector);
+ }
+
+ @Override
+ public FIELDSET<FIELDSET<T>> fieldset() {
+ closeAttrs();
+ return fieldset_(this, false);
+ }
+
+ @Override
+ public FIELDSET<FIELDSET<T>> fieldset(String selector) {
+ return setSelector(fieldset(), selector);
+ }
+
+ @Override
+ public B<FIELDSET<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<FIELDSET<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<FIELDSET<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<FIELDSET<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<FIELDSET<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<FIELDSET<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<FIELDSET<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<FIELDSET<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<FIELDSET<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<FIELDSET<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<FIELDSET<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<FIELDSET<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<FIELDSET<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<FIELDSET<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public FIELDSET<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public FIELDSET<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<FIELDSET<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<FIELDSET<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<FIELDSET<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<FIELDSET<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<FIELDSET<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<FIELDSET<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<FIELDSET<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public FIELDSET<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<FIELDSET<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<FIELDSET<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<FIELDSET<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<FIELDSET<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public FIELDSET<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<FIELDSET<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<FIELDSET<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<FIELDSET<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<FIELDSET<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<FIELDSET<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<FIELDSET<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<FIELDSET<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<FIELDSET<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<FIELDSET<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<FIELDSET<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public FIELDSET<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<FIELDSET<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<FIELDSET<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public FIELDSET<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ private <T extends _> LEGEND<T> legend_(T e, boolean inline) {
+ return new LEGEND<T>("legend", e, opt(true, inline, false)); }
+
+ public class TEXTAREA<T extends _> extends EImp<T> implements HamletSpec.TEXTAREA {
+ public TEXTAREA(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public TEXTAREA<T> $name(String value) {
+ addAttr("name", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $disabled() {
+ addAttr("disabled", null);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $tabindex(int value) {
+ addAttr("tabindex", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $accesskey(String value) {
+ addAttr("accesskey", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $onfocus(String value) {
+ addAttr("onfocus", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $onblur(String value) {
+ addAttr("onblur", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $rows(int value) {
+ addAttr("rows", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $cols(int value) {
+ addAttr("cols", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $readonly() {
+ addAttr("readonly", null);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $onselect(String value) {
+ addAttr("onselect", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $onchange(String value) {
+ addAttr("onchange", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public TEXTAREA<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+ }
+
+ public class OPTION<T extends _> extends EImp<T> implements HamletSpec.OPTION {
+ public OPTION(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public OPTION<T> $value(String value) {
+ addAttr("value", value);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> $disabled() {
+ addAttr("disabled", null);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> $selected() {
+ addAttr("selected", null);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> $label(String value) {
+ addAttr("label", value);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public OPTION<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+ }
+
+ public class OPTGROUP<T extends _> extends EImp<T> implements HamletSpec.OPTGROUP {
+ public OPTGROUP(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public OPTGROUP<T> $disabled() {
+ addAttr("disabled", null);
+ return this;
+ }
+
+ @Override
+ public OPTGROUP<T> $label(String value) {
+ addAttr("label", value);
+ return this;
+ }
+
+ @Override
+ public OPTGROUP<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public OPTGROUP<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public OPTGROUP<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public OPTGROUP<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public OPTGROUP<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public OPTGROUP<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public OPTGROUP<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public OPTGROUP<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public OPTGROUP<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public OPTGROUP<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public OPTGROUP<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public OPTGROUP<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public OPTGROUP<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public OPTGROUP<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public OPTGROUP<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public OPTGROUP<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public OPTION<OPTGROUP<T>> option() {
+ closeAttrs();
+ return option_(this, false);
+ }
+
+ @Override
+ public OPTGROUP<T> option(String cdata) {
+ return option()._(cdata)._();
+ }
+ }
+
+ private <T extends _> OPTGROUP<T> optgroup_(T e, boolean inline) {
+ return new OPTGROUP<T>("optgroup", e, opt(true, inline, false)); }
+
+ public class SELECT<T extends _> extends EImp<T> implements HamletSpec.SELECT {
+ public SELECT(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public SELECT<T> $name(String value) {
+ addAttr("name", value);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $disabled() {
+ addAttr("disabled", null);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $tabindex(int value) {
+ addAttr("tabindex", value);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $onfocus(String value) {
+ addAttr("onfocus", value);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $onblur(String value) {
+ addAttr("onblur", value);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $onchange(String value) {
+ addAttr("onchange", value);
+ return this;
+ }
+
+ @Override
+ public OPTGROUP<SELECT<T>> optgroup() {
+ closeAttrs();
+ return optgroup_(this, false);
+ }
+
+ @Override
+ public SELECT<T> $size(int value) {
+ addAttr("size", value);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $multiple() {
+ addAttr("multiple", null);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public SELECT<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public OPTION<SELECT<T>> option() {
+ closeAttrs();
+ return option_(this, false);
+ }
+
+ @Override
+ public SELECT<T> option(String cdata) {
+ return option()._(cdata)._();
+ }
+ }
+
+ private <T extends _> OPTION<T> option_(T e, boolean inline) {
+ return new OPTION<T>("option", e, opt(false, inline, false)); }
+
+ public class INPUT<T extends _> extends EImp<T> implements HamletSpec.INPUT {
+ public INPUT(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public INPUT<T> $type(InputType value) {
+ addAttr("type", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $src(String value) {
+ addAttr("src", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $name(String value) {
+ addAttr("name", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $value(String value) {
+ addAttr("value", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $disabled() {
+ addAttr("disabled", null);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $tabindex(int value) {
+ addAttr("tabindex", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $accesskey(String value) {
+ addAttr("accesskey", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $onfocus(String value) {
+ addAttr("onfocus", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $onblur(String value) {
+ addAttr("onblur", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $readonly() {
+ addAttr("readonly", null);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $onselect(String value) {
+ addAttr("onselect", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $onchange(String value) {
+ addAttr("onchange", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $size(String value) {
+ addAttr("size", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $checked() {
+ addAttr("checked", null);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $maxlength(int value) {
+ addAttr("maxlength", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $alt(String value) {
+ addAttr("alt", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $ismap() {
+ addAttr("ismap", null);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $accept(String value) {
+ addAttr("accept", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public INPUT<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+ }
+
+ public class LABEL<T extends _> extends EImp<T> implements HamletSpec.LABEL {
+ public LABEL(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public LABEL<T> $accesskey(String value) {
+ addAttr("accesskey", value);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> $onfocus(String value) {
+ addAttr("onfocus", value);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> $onblur(String value) {
+ addAttr("onblur", value);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> $for(String value) {
+ addAttr("for", value);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public LABEL<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<LABEL<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public LABEL<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<LABEL<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public LABEL<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<LABEL<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public LABEL<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public LABEL<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<LABEL<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public LABEL<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<LABEL<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public LABEL<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<LABEL<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public LABEL<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<LABEL<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public LABEL<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public LABEL<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<LABEL<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public LABEL<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<LABEL<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public LABEL<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<LABEL<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public LABEL<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<LABEL<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public LABEL<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<LABEL<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public LABEL<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<LABEL<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<LABEL<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public LABEL<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public LABEL<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<LABEL<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public LABEL<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<LABEL<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<LABEL<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<LABEL<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public LABEL<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<LABEL<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public LABEL<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<LABEL<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<LABEL<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public LABEL<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<LABEL<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<LABEL<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public LABEL<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<LABEL<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public LABEL<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<LABEL<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public LABEL<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<LABEL<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public LABEL<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<LABEL<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public LABEL<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<LABEL<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public LABEL<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public INPUT<LABEL<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<LABEL<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<LABEL<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<LABEL<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<LABEL<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<LABEL<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public LABEL<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<LABEL<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<LABEL<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public LABEL<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class FORM<T extends _> extends EImp<T> implements HamletSpec.FORM {
+ public FORM(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public FORM<T> $name(String value) {
+ addAttr("name", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $accept(String value) {
+ addAttr("accept", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $action(String value) {
+ addAttr("action", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $method(Method value) {
+ addAttr("method", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $enctype(String value) {
+ addAttr("enctype", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $onsubmit(String value) {
+ addAttr("onsubmit", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $onreset(String value) {
+ addAttr("onreset", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $accept_charset(String value) {
+ addAttr("accept-charset", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public FORM<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public SCRIPT<FORM<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public FORM<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public TABLE<FORM<T>> table() {
+ closeAttrs();
+ return table_(this, false);
+ }
+
+ @Override
+ public TABLE<FORM<T>> table(String selector) {
+ return setSelector(table(), selector);
+ }
+
+ @Override
+ public FORM<T> address(String cdata) {
+ return address()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<FORM<T>> address() {
+ closeAttrs();
+ return address_(this, false);
+ }
+
+ @Override
+ public P<FORM<T>> p(String selector) {
+ return setSelector(p(), selector);
+ }
+
+ @Override
+ public P<FORM<T>> p() {
+ closeAttrs();
+ return p_(this, false);
+ }
+
+ @Override
+ public FORM<T> _(Class<? extends SubView> cls) {
+ _v(cls);
+ return this;
+ }
+
+ @Override
+ public HR<FORM<T>> hr() {
+ closeAttrs();
+ return hr_(this, false);
+ }
+
+ @Override
+ public FORM<T> hr(String selector) {
+ return setSelector(hr(), selector)._();
+ }
+
+ @Override
+ public DL<FORM<T>> dl(String selector) {
+ return setSelector(dl(), selector);
+ }
+
+ @Override
+ public DL<FORM<T>> dl() {
+ closeAttrs();
+ return dl_(this, false);
+ }
+
+ @Override
+ public DIV<FORM<T>> div(String selector) {
+ return setSelector(div(), selector);
+ }
+
+ @Override
+ public DIV<FORM<T>> div() {
+ closeAttrs();
+ return div_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<FORM<T>> blockquote() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<FORM<T>> bq() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public FORM<T> h1(String cdata) {
+ return h1()._(cdata)._();
+ }
+
+ @Override
+ public H1<FORM<T>> h1() {
+ closeAttrs();
+ return h1_(this, false);
+ }
+
+ @Override
+ public FORM<T> h1(String selector, String cdata) {
+ return setSelector(h1(), selector)._(cdata)._();
+ }
+
+ @Override
+ public FORM<T> h2(String cdata) {
+ return h2()._(cdata)._();
+ }
+
+ @Override
+ public H2<FORM<T>> h2() {
+ closeAttrs();
+ return h2_(this, false);
+ }
+
+ @Override
+ public FORM<T> h2(String selector, String cdata) {
+ return setSelector(h2(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H3<FORM<T>> h3() {
+ closeAttrs();
+ return h3_(this, false);
+ }
+
+ @Override
+ public FORM<T> h3(String cdata) {
+ return h3()._(cdata)._();
+ }
+
+ @Override
+ public FORM<T> h3(String selector, String cdata) {
+ return setSelector(h3(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H4<FORM<T>> h4() {
+ closeAttrs();
+ return h4_(this, false);
+ }
+
+ @Override
+ public FORM<T> h4(String cdata) {
+ return h4()._(cdata)._();
+ }
+
+ @Override
+ public FORM<T> h4(String selector, String cdata) {
+ return setSelector(h4(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H5<FORM<T>> h5() {
+ closeAttrs();
+ return h5_(this, false);
+ }
+
+ @Override
+ public FORM<T> h5(String cdata) {
+ return h5()._(cdata)._();
+ }
+
+ @Override
+ public FORM<T> h5(String selector, String cdata) {
+ return setSelector(h5(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H6<FORM<T>> h6() {
+ closeAttrs();
+ return h6_(this, false);
+ }
+
+ @Override
+ public FORM<T> h6(String cdata) {
+ return h6()._(cdata)._();
+ }
+
+ @Override
+ public FORM<T> h6(String selector, String cdata) {
+ return setSelector(h6(), selector)._(cdata)._();
+ }
+
+ @Override
+ public UL<FORM<T>> ul() {
+ closeAttrs();
+ return ul_(this, false);
+ }
+
+ @Override
+ public UL<FORM<T>> ul(String selector) {
+ return setSelector(ul(), selector);
+ }
+
+ @Override
+ public OL<FORM<T>> ol() {
+ closeAttrs();
+ return ol_(this, false);
+ }
+
+ @Override
+ public OL<FORM<T>> ol(String selector) {
+ return setSelector(ol(), selector);
+ }
+
+ @Override
+ public PRE<FORM<T>> pre() {
+ closeAttrs();
+ return pre_(this, false);
+ }
+
+ @Override
+ public PRE<FORM<T>> pre(String selector) {
+ return setSelector(pre(), selector);
+ }
+
+ @Override
+ public FIELDSET<FORM<T>> fieldset() {
+ closeAttrs();
+ return fieldset_(this, false);
+ }
+
+ @Override
+ public FIELDSET<FORM<T>> fieldset(String selector) {
+ return setSelector(fieldset(), selector);
+ }
+ }
+
+ public class LI<T extends _> extends EImp<T> implements HamletSpec.LI {
+ public LI(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public LI<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public LI<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public LI<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public LI<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public LI<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public LI<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public LI<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public LI<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public LI<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public LI<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public LI<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public LI<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public LI<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public LI<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public LI<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public LI<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<LI<T>> table() {
+ closeAttrs();
+ return table_(this, false);
+ }
+
+ @Override
+ public TABLE<LI<T>> table(String selector) {
+ return setSelector(table(), selector);
+ }
+
+ @Override
+ public LI<T> address(String cdata) {
+ return address()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<LI<T>> address() {
+ closeAttrs();
+ return address_(this, false);
+ }
+
+ @Override
+ public P<LI<T>> p(String selector) {
+ return setSelector(p(), selector);
+ }
+
+ @Override
+ public P<LI<T>> p() {
+ closeAttrs();
+ return p_(this, false);
+ }
+
+ @Override
+ public LI<T> _(Class<? extends SubView> cls) {
+ _v(cls);
+ return this;
+ }
+
+ @Override
+ public HR<LI<T>> hr() {
+ closeAttrs();
+ return hr_(this, false);
+ }
+
+ @Override
+ public LI<T> hr(String selector) {
+ return setSelector(hr(), selector)._();
+ }
+
+ @Override
+ public DL<LI<T>> dl(String selector) {
+ return setSelector(dl(), selector);
+ }
+
+ @Override
+ public DL<LI<T>> dl() {
+ closeAttrs();
+ return dl_(this, false);
+ }
+
+ @Override
+ public DIV<LI<T>> div(String selector) {
+ return setSelector(div(), selector);
+ }
+
+ @Override
+ public DIV<LI<T>> div() {
+ closeAttrs();
+ return div_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<LI<T>> blockquote() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<LI<T>> bq() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public LI<T> h1(String cdata) {
+ return h1()._(cdata)._();
+ }
+
+ @Override
+ public H1<LI<T>> h1() {
+ closeAttrs();
+ return h1_(this, false);
+ }
+
+ @Override
+ public LI<T> h1(String selector, String cdata) {
+ return setSelector(h1(), selector)._(cdata)._();
+ }
+
+ @Override
+ public LI<T> h2(String cdata) {
+ return h2()._(cdata)._();
+ }
+
+ @Override
+ public H2<LI<T>> h2() {
+ closeAttrs();
+ return h2_(this, false);
+ }
+
+ @Override
+ public LI<T> h2(String selector, String cdata) {
+ return setSelector(h2(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H3<LI<T>> h3() {
+ closeAttrs();
+ return h3_(this, false);
+ }
+
+ @Override
+ public LI<T> h3(String cdata) {
+ return h3()._(cdata)._();
+ }
+
+ @Override
+ public LI<T> h3(String selector, String cdata) {
+ return setSelector(h3(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H4<LI<T>> h4() {
+ closeAttrs();
+ return h4_(this, false);
+ }
+
+ @Override
+ public LI<T> h4(String cdata) {
+ return h4()._(cdata)._();
+ }
+
+ @Override
+ public LI<T> h4(String selector, String cdata) {
+ return setSelector(h4(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H5<LI<T>> h5() {
+ closeAttrs();
+ return h5_(this, false);
+ }
+
+ @Override
+ public LI<T> h5(String cdata) {
+ return h5()._(cdata)._();
+ }
+
+ @Override
+ public LI<T> h5(String selector, String cdata) {
+ return setSelector(h5(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H6<LI<T>> h6() {
+ closeAttrs();
+ return h6_(this, false);
+ }
+
+ @Override
+ public LI<T> h6(String cdata) {
+ return h6()._(cdata)._();
+ }
+
+ @Override
+ public LI<T> h6(String selector, String cdata) {
+ return setSelector(h6(), selector)._(cdata)._();
+ }
+
+ @Override
+ public UL<LI<T>> ul() {
+ closeAttrs();
+ return ul_(this, false);
+ }
+
+ @Override
+ public UL<LI<T>> ul(String selector) {
+ return setSelector(ul(), selector);
+ }
+
+ @Override
+ public OL<LI<T>> ol() {
+ closeAttrs();
+ return ol_(this, false);
+ }
+
+ @Override
+ public OL<LI<T>> ol(String selector) {
+ return setSelector(ol(), selector);
+ }
+
+ @Override
+ public PRE<LI<T>> pre() {
+ closeAttrs();
+ return pre_(this, false);
+ }
+
+ @Override
+ public PRE<LI<T>> pre(String selector) {
+ return setSelector(pre(), selector);
+ }
+
+ @Override
+ public FORM<LI<T>> form() {
+ closeAttrs();
+ return form_(this, false);
+ }
+
+ @Override
+ public FORM<LI<T>> form(String selector) {
+ return setSelector(form(), selector);
+ }
+
+ @Override
+ public FIELDSET<LI<T>> fieldset() {
+ closeAttrs();
+ return fieldset_(this, false);
+ }
+
+ @Override
+ public FIELDSET<LI<T>> fieldset(String selector) {
+ return setSelector(fieldset(), selector);
+ }
+
+ @Override
+ public LI<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public LI<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<LI<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public LI<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public LI<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<LI<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public LI<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public LI<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<LI<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public LI<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public LI<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public LI<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<LI<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public LI<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<LI<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public LI<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public LI<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<LI<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public LI<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public LI<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<LI<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public LI<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public LI<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public LI<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<LI<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public LI<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<LI<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public LI<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public LI<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<LI<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public LI<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public LI<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<LI<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public LI<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public LI<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<LI<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public LI<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public LI<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<LI<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<LI<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public LI<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public LI<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<LI<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public LI<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<LI<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<LI<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<LI<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public LI<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public LI<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<LI<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public LI<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public LI<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<LI<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<LI<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public LI<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public LI<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<LI<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<LI<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public LI<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<LI<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public LI<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<LI<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public LI<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public LI<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<LI<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public LI<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<LI<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public LI<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<LI<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public LI<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<LI<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public LI<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<LI<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<LI<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<LI<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<LI<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<LI<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<LI<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public LI<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<LI<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<LI<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public LI<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class UL<T extends _> extends EImp<T> implements HamletSpec.UL {
+ public UL(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public UL<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public UL<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public UL<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public UL<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public UL<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public UL<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public UL<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public UL<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public UL<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public UL<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public UL<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public UL<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public UL<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public UL<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public UL<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public UL<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public LI<UL<T>> li() {
+ closeAttrs();
+ return li_(this, false);
+ }
+
+ @Override
+ public UL<T> li(String cdata) {
+ return li()._(cdata)._();
+ }
+ }
+
+ public class OL<T extends _> extends EImp<T> implements HamletSpec.OL {
+ public OL(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public OL<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public OL<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public OL<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public OL<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public OL<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public OL<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public OL<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public OL<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public OL<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public OL<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public OL<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public OL<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public OL<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public OL<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public OL<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public OL<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public LI<OL<T>> li() {
+ closeAttrs();
+ return li_(this, false);
+ }
+
+ @Override
+ public OL<T> li(String cdata) {
+ return li()._(cdata)._();
+ }
+ }
+
+ private <T extends _> LI<T> li_(T e, boolean inline) {
+ return new LI<T>("li", e, opt(false, inline, false)); }
+
+ public class DD<T extends _> extends EImp<T> implements HamletSpec.DD {
+ public DD(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public DD<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public DD<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public DD<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public DD<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public DD<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public DD<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public DD<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public DD<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public DD<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public DD<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public DD<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public DD<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public DD<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public DD<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public DD<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public DD<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<DD<T>> table() {
+ closeAttrs();
+ return table_(this, false);
+ }
+
+ @Override
+ public TABLE<DD<T>> table(String selector) {
+ return setSelector(table(), selector);
+ }
+
+ @Override
+ public DD<T> address(String cdata) {
+ return address()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<DD<T>> address() {
+ closeAttrs();
+ return address_(this, false);
+ }
+
+ @Override
+ public P<DD<T>> p(String selector) {
+ return setSelector(p(), selector);
+ }
+
+ @Override
+ public P<DD<T>> p() {
+ closeAttrs();
+ return p_(this, false);
+ }
+
+ @Override
+ public DD<T> _(Class<? extends SubView> cls) {
+ _v(cls);
+ return this;
+ }
+
+ @Override
+ public HR<DD<T>> hr() {
+ closeAttrs();
+ return hr_(this, false);
+ }
+
+ @Override
+ public DD<T> hr(String selector) {
+ return setSelector(hr(), selector)._();
+ }
+
+ @Override
+ public DL<DD<T>> dl(String selector) {
+ return setSelector(dl(), selector);
+ }
+
+ @Override
+ public DL<DD<T>> dl() {
+ closeAttrs();
+ return dl_(this, false);
+ }
+
+ @Override
+ public DIV<DD<T>> div(String selector) {
+ return setSelector(div(), selector);
+ }
+
+ @Override
+ public DIV<DD<T>> div() {
+ closeAttrs();
+ return div_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<DD<T>> blockquote() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<DD<T>> bq() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public DD<T> h1(String cdata) {
+ return h1()._(cdata)._();
+ }
+
+ @Override
+ public H1<DD<T>> h1() {
+ closeAttrs();
+ return h1_(this, false);
+ }
+
+ @Override
+ public DD<T> h1(String selector, String cdata) {
+ return setSelector(h1(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DD<T> h2(String cdata) {
+ return h2()._(cdata)._();
+ }
+
+ @Override
+ public H2<DD<T>> h2() {
+ closeAttrs();
+ return h2_(this, false);
+ }
+
+ @Override
+ public DD<T> h2(String selector, String cdata) {
+ return setSelector(h2(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H3<DD<T>> h3() {
+ closeAttrs();
+ return h3_(this, false);
+ }
+
+ @Override
+ public DD<T> h3(String cdata) {
+ return h3()._(cdata)._();
+ }
+
+ @Override
+ public DD<T> h3(String selector, String cdata) {
+ return setSelector(h3(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H4<DD<T>> h4() {
+ closeAttrs();
+ return h4_(this, false);
+ }
+
+ @Override
+ public DD<T> h4(String cdata) {
+ return h4()._(cdata)._();
+ }
+
+ @Override
+ public DD<T> h4(String selector, String cdata) {
+ return setSelector(h4(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H5<DD<T>> h5() {
+ closeAttrs();
+ return h5_(this, false);
+ }
+
+ @Override
+ public DD<T> h5(String cdata) {
+ return h5()._(cdata)._();
+ }
+
+ @Override
+ public DD<T> h5(String selector, String cdata) {
+ return setSelector(h5(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H6<DD<T>> h6() {
+ closeAttrs();
+ return h6_(this, false);
+ }
+
+ @Override
+ public DD<T> h6(String cdata) {
+ return h6()._(cdata)._();
+ }
+
+ @Override
+ public DD<T> h6(String selector, String cdata) {
+ return setSelector(h6(), selector)._(cdata)._();
+ }
+
+ @Override
+ public UL<DD<T>> ul() {
+ closeAttrs();
+ return ul_(this, false);
+ }
+
+ @Override
+ public UL<DD<T>> ul(String selector) {
+ return setSelector(ul(), selector);
+ }
+
+ @Override
+ public OL<DD<T>> ol() {
+ closeAttrs();
+ return ol_(this, false);
+ }
+
+ @Override
+ public OL<DD<T>> ol(String selector) {
+ return setSelector(ol(), selector);
+ }
+
+ @Override
+ public PRE<DD<T>> pre() {
+ closeAttrs();
+ return pre_(this, false);
+ }
+
+ @Override
+ public PRE<DD<T>> pre(String selector) {
+ return setSelector(pre(), selector);
+ }
+
+ @Override
+ public FORM<DD<T>> form() {
+ closeAttrs();
+ return form_(this, false);
+ }
+
+ @Override
+ public FORM<DD<T>> form(String selector) {
+ return setSelector(form(), selector);
+ }
+
+ @Override
+ public FIELDSET<DD<T>> fieldset() {
+ closeAttrs();
+ return fieldset_(this, false);
+ }
+
+ @Override
+ public FIELDSET<DD<T>> fieldset(String selector) {
+ return setSelector(fieldset(), selector);
+ }
+
+ @Override
+ public DD<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public DD<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<DD<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public DD<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public DD<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<DD<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public DD<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public DD<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<DD<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public DD<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public DD<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DD<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<DD<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public DD<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<DD<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public DD<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public DD<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<DD<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public DD<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public DD<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<DD<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public DD<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public DD<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DD<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<DD<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public DD<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<DD<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public DD<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public DD<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<DD<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public DD<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public DD<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<DD<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public DD<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public DD<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<DD<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public DD<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public DD<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<DD<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<DD<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public DD<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public DD<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<DD<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public DD<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<DD<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<DD<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<DD<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public DD<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public DD<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<DD<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public DD<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public DD<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<DD<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<DD<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public DD<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public DD<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<DD<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<DD<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public DD<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<DD<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public DD<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<DD<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public DD<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public DD<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<DD<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public DD<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<DD<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public DD<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<DD<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public DD<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<DD<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public DD<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<DD<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<DD<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<DD<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<DD<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<DD<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<DD<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public DD<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<DD<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<DD<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public DD<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class DT<T extends _> extends EImp<T> implements HamletSpec.DT {
+ public DT(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public DT<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public DT<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public DT<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public DT<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public DT<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public DT<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public DT<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public DT<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public DT<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public DT<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public DT<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public DT<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public DT<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public DT<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public DT<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public DT<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public DT<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public DT<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<DT<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public DT<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public DT<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<DT<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public DT<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public DT<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<DT<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public DT<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public DT<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DT<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<DT<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public DT<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<DT<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public DT<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public DT<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<DT<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public DT<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public DT<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<DT<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public DT<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public DT<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DT<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<DT<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public DT<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<DT<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public DT<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public DT<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<DT<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public DT<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public DT<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<DT<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public DT<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public DT<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<DT<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public DT<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public DT<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<DT<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<DT<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public DT<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public DT<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<DT<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public DT<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<DT<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<DT<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<DT<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public DT<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public DT<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<DT<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public DT<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public DT<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<DT<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<DT<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public DT<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public DT<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<DT<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<DT<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public DT<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<DT<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public DT<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<DT<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public DT<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public DT<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<DT<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public DT<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<DT<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public DT<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<DT<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public DT<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<DT<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public DT<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<DT<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<DT<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<DT<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<DT<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<DT<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<DT<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public DT<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<DT<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<DT<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public DT<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class DL<T extends _> extends EImp<T> implements HamletSpec.DL {
+ public DL(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public DL<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public DL<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public DL<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public DL<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public DL<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public DL<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public DL<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public DL<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public DL<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public DL<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public DL<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public DL<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public DL<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public DL<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public DL<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public DL<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public DT<DL<T>> dt() {
+ closeAttrs();
+ return dt_(this, false);
+ }
+
+ @Override
+ public DL<T> dt(String cdata) {
+ return dt()._(cdata)._();
+ }
+
+ @Override
+ public DD<DL<T>> dd() {
+ closeAttrs();
+ return dd_(this, false);
+ }
+
+ @Override
+ public DL<T> dd(String cdata) {
+ return dd()._(cdata)._();
+ }
+ }
+
+ private <T extends _> DT<T> dt_(T e, boolean inline) {
+ return new DT<T>("dt", e, opt(false, inline, false)); }
+
+ private <T extends _> DD<T> dd_(T e, boolean inline) {
+ return new DD<T>("dd", e, opt(false, inline, false)); }
+
+ public class DEL<T extends _> extends EImp<T> implements HamletSpec.DEL {
+ public DEL(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public DEL<T> $cite(String value) {
+ addAttr("cite", value);
+ return this;
+ }
+
+ @Override
+ public DEL<T> $datetime(String value) {
+ addAttr("datetime", value);
+ return this;
+ }
+
+ @Override
+ public DEL<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public DEL<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public DEL<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public DEL<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public DEL<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public DEL<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public DEL<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public DEL<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public DEL<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public DEL<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public DEL<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public DEL<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public DEL<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public DEL<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public DEL<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public DEL<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<DEL<T>> table() {
+ closeAttrs();
+ return table_(this, false);
+ }
+
+ @Override
+ public TABLE<DEL<T>> table(String selector) {
+ return setSelector(table(), selector);
+ }
+
+ @Override
+ public DEL<T> address(String cdata) {
+ return address()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<DEL<T>> address() {
+ closeAttrs();
+ return address_(this, false);
+ }
+
+ @Override
+ public P<DEL<T>> p(String selector) {
+ return setSelector(p(), selector);
+ }
+
+ @Override
+ public P<DEL<T>> p() {
+ closeAttrs();
+ return p_(this, false);
+ }
+
+ @Override
+ public DEL<T> _(Class<? extends SubView> cls) {
+ _v(cls);
+ return this;
+ }
+
+ @Override
+ public HR<DEL<T>> hr() {
+ closeAttrs();
+ return hr_(this, false);
+ }
+
+ @Override
+ public DEL<T> hr(String selector) {
+ return setSelector(hr(), selector)._();
+ }
+
+ @Override
+ public DL<DEL<T>> dl(String selector) {
+ return setSelector(dl(), selector);
+ }
+
+ @Override
+ public DL<DEL<T>> dl() {
+ closeAttrs();
+ return dl_(this, false);
+ }
+
+ @Override
+ public DIV<DEL<T>> div(String selector) {
+ return setSelector(div(), selector);
+ }
+
+ @Override
+ public DIV<DEL<T>> div() {
+ closeAttrs();
+ return div_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<DEL<T>> blockquote() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<DEL<T>> bq() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public DEL<T> h1(String cdata) {
+ return h1()._(cdata)._();
+ }
+
+ @Override
+ public H1<DEL<T>> h1() {
+ closeAttrs();
+ return h1_(this, false);
+ }
+
+ @Override
+ public DEL<T> h1(String selector, String cdata) {
+ return setSelector(h1(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> h2(String cdata) {
+ return h2()._(cdata)._();
+ }
+
+ @Override
+ public H2<DEL<T>> h2() {
+ closeAttrs();
+ return h2_(this, false);
+ }
+
+ @Override
+ public DEL<T> h2(String selector, String cdata) {
+ return setSelector(h2(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H3<DEL<T>> h3() {
+ closeAttrs();
+ return h3_(this, false);
+ }
+
+ @Override
+ public DEL<T> h3(String cdata) {
+ return h3()._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> h3(String selector, String cdata) {
+ return setSelector(h3(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H4<DEL<T>> h4() {
+ closeAttrs();
+ return h4_(this, false);
+ }
+
+ @Override
+ public DEL<T> h4(String cdata) {
+ return h4()._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> h4(String selector, String cdata) {
+ return setSelector(h4(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H5<DEL<T>> h5() {
+ closeAttrs();
+ return h5_(this, false);
+ }
+
+ @Override
+ public DEL<T> h5(String cdata) {
+ return h5()._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> h5(String selector, String cdata) {
+ return setSelector(h5(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H6<DEL<T>> h6() {
+ closeAttrs();
+ return h6_(this, false);
+ }
+
+ @Override
+ public DEL<T> h6(String cdata) {
+ return h6()._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> h6(String selector, String cdata) {
+ return setSelector(h6(), selector)._(cdata)._();
+ }
+
+ @Override
+ public UL<DEL<T>> ul() {
+ closeAttrs();
+ return ul_(this, false);
+ }
+
+ @Override
+ public UL<DEL<T>> ul(String selector) {
+ return setSelector(ul(), selector);
+ }
+
+ @Override
+ public OL<DEL<T>> ol() {
+ closeAttrs();
+ return ol_(this, false);
+ }
+
+ @Override
+ public OL<DEL<T>> ol(String selector) {
+ return setSelector(ol(), selector);
+ }
+
+ @Override
+ public PRE<DEL<T>> pre() {
+ closeAttrs();
+ return pre_(this, false);
+ }
+
+ @Override
+ public PRE<DEL<T>> pre(String selector) {
+ return setSelector(pre(), selector);
+ }
+
+ @Override
+ public FORM<DEL<T>> form() {
+ closeAttrs();
+ return form_(this, false);
+ }
+
+ @Override
+ public FORM<DEL<T>> form(String selector) {
+ return setSelector(form(), selector);
+ }
+
+ @Override
+ public FIELDSET<DEL<T>> fieldset() {
+ closeAttrs();
+ return fieldset_(this, false);
+ }
+
+ @Override
+ public FIELDSET<DEL<T>> fieldset(String selector) {
+ return setSelector(fieldset(), selector);
+ }
+
+ @Override
+ public DEL<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public DEL<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<DEL<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public DEL<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<DEL<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public DEL<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<DEL<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public DEL<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<DEL<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public DEL<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<DEL<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public DEL<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<DEL<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public DEL<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<DEL<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public DEL<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<DEL<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public DEL<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<DEL<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public DEL<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<DEL<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public DEL<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<DEL<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public DEL<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<DEL<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public DEL<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<DEL<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<DEL<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public DEL<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public DEL<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<DEL<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public DEL<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<DEL<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<DEL<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<DEL<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public DEL<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<DEL<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public DEL<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<DEL<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<DEL<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public DEL<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<DEL<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<DEL<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public DEL<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<DEL<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public DEL<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<DEL<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public DEL<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public DEL<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<DEL<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public DEL<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<DEL<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public DEL<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<DEL<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public DEL<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<DEL<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public DEL<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<DEL<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<DEL<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<DEL<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<DEL<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<DEL<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<DEL<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public DEL<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<DEL<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<DEL<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public DEL<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class INS<T extends _> extends EImp<T> implements HamletSpec.INS {
+ public INS(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public INS<T> $cite(String value) {
+ addAttr("cite", value);
+ return this;
+ }
+
+ @Override
+ public INS<T> $datetime(String value) {
+ addAttr("datetime", value);
+ return this;
+ }
+
+ @Override
+ public INS<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public INS<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public INS<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public INS<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public INS<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public INS<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public INS<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public INS<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public INS<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public INS<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public INS<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public INS<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public INS<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public INS<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public INS<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public INS<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<INS<T>> table() {
+ closeAttrs();
+ return table_(this, false);
+ }
+
+ @Override
+ public TABLE<INS<T>> table(String selector) {
+ return setSelector(table(), selector);
+ }
+
+ @Override
+ public INS<T> address(String cdata) {
+ return address()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<INS<T>> address() {
+ closeAttrs();
+ return address_(this, false);
+ }
+
+ @Override
+ public P<INS<T>> p(String selector) {
+ return setSelector(p(), selector);
+ }
+
+ @Override
+ public P<INS<T>> p() {
+ closeAttrs();
+ return p_(this, false);
+ }
+
+ @Override
+ public INS<T> _(Class<? extends SubView> cls) {
+ _v(cls);
+ return this;
+ }
+
+ @Override
+ public HR<INS<T>> hr() {
+ closeAttrs();
+ return hr_(this, false);
+ }
+
+ @Override
+ public INS<T> hr(String selector) {
+ return setSelector(hr(), selector)._();
+ }
+
+ @Override
+ public DL<INS<T>> dl(String selector) {
+ return setSelector(dl(), selector);
+ }
+
+ @Override
+ public DL<INS<T>> dl() {
+ closeAttrs();
+ return dl_(this, false);
+ }
+
+ @Override
+ public DIV<INS<T>> div(String selector) {
+ return setSelector(div(), selector);
+ }
+
+ @Override
+ public DIV<INS<T>> div() {
+ closeAttrs();
+ return div_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<INS<T>> blockquote() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<INS<T>> bq() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public INS<T> h1(String cdata) {
+ return h1()._(cdata)._();
+ }
+
+ @Override
+ public H1<INS<T>> h1() {
+ closeAttrs();
+ return h1_(this, false);
+ }
+
+ @Override
+ public INS<T> h1(String selector, String cdata) {
+ return setSelector(h1(), selector)._(cdata)._();
+ }
+
+ @Override
+ public INS<T> h2(String cdata) {
+ return h2()._(cdata)._();
+ }
+
+ @Override
+ public H2<INS<T>> h2() {
+ closeAttrs();
+ return h2_(this, false);
+ }
+
+ @Override
+ public INS<T> h2(String selector, String cdata) {
+ return setSelector(h2(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H3<INS<T>> h3() {
+ closeAttrs();
+ return h3_(this, false);
+ }
+
+ @Override
+ public INS<T> h3(String cdata) {
+ return h3()._(cdata)._();
+ }
+
+ @Override
+ public INS<T> h3(String selector, String cdata) {
+ return setSelector(h3(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H4<INS<T>> h4() {
+ closeAttrs();
+ return h4_(this, false);
+ }
+
+ @Override
+ public INS<T> h4(String cdata) {
+ return h4()._(cdata)._();
+ }
+
+ @Override
+ public INS<T> h4(String selector, String cdata) {
+ return setSelector(h4(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H5<INS<T>> h5() {
+ closeAttrs();
+ return h5_(this, false);
+ }
+
+ @Override
+ public INS<T> h5(String cdata) {
+ return h5()._(cdata)._();
+ }
+
+ @Override
+ public INS<T> h5(String selector, String cdata) {
+ return setSelector(h5(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H6<INS<T>> h6() {
+ closeAttrs();
+ return h6_(this, false);
+ }
+
+ @Override
+ public INS<T> h6(String cdata) {
+ return h6()._(cdata)._();
+ }
+
+ @Override
+ public INS<T> h6(String selector, String cdata) {
+ return setSelector(h6(), selector)._(cdata)._();
+ }
+
+ @Override
+ public UL<INS<T>> ul() {
+ closeAttrs();
+ return ul_(this, false);
+ }
+
+ @Override
+ public UL<INS<T>> ul(String selector) {
+ return setSelector(ul(), selector);
+ }
+
+ @Override
+ public OL<INS<T>> ol() {
+ closeAttrs();
+ return ol_(this, false);
+ }
+
+ @Override
+ public OL<INS<T>> ol(String selector) {
+ return setSelector(ol(), selector);
+ }
+
+ @Override
+ public PRE<INS<T>> pre() {
+ closeAttrs();
+ return pre_(this, false);
+ }
+
+ @Override
+ public PRE<INS<T>> pre(String selector) {
+ return setSelector(pre(), selector);
+ }
+
+ @Override
+ public FORM<INS<T>> form() {
+ closeAttrs();
+ return form_(this, false);
+ }
+
+ @Override
+ public FORM<INS<T>> form(String selector) {
+ return setSelector(form(), selector);
+ }
+
+ @Override
+ public FIELDSET<INS<T>> fieldset() {
+ closeAttrs();
+ return fieldset_(this, false);
+ }
+
+ @Override
+ public FIELDSET<INS<T>> fieldset(String selector) {
+ return setSelector(fieldset(), selector);
+ }
+
+ @Override
+ public INS<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public INS<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<INS<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public INS<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public INS<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<INS<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public INS<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public INS<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<INS<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public INS<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public INS<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public INS<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<INS<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public INS<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<INS<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public INS<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public INS<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<INS<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public INS<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public INS<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<INS<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public INS<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public INS<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public INS<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<INS<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public INS<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<INS<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public INS<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public INS<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<INS<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public INS<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public INS<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<INS<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public INS<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public INS<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<INS<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public INS<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public INS<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<INS<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<INS<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public INS<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public INS<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<INS<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public INS<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<INS<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<INS<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<INS<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public INS<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public INS<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<INS<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public INS<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public INS<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<INS<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<INS<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public INS<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public INS<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<INS<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<INS<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public INS<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<INS<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public INS<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<INS<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public INS<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public INS<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<INS<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public INS<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<INS<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public INS<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<INS<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public INS<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<INS<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public INS<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<INS<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<INS<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<INS<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<INS<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<INS<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<INS<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public INS<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<INS<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<INS<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public INS<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class BLOCKQUOTE<T extends _> extends EImp<T> implements HamletSpec.BLOCKQUOTE {
+ public BLOCKQUOTE(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public BLOCKQUOTE<T> $cite(String value) {
+ addAttr("cite", value);
+ return this;
+ }
+
+ @Override
+ public BLOCKQUOTE<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public BLOCKQUOTE<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public BLOCKQUOTE<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public BLOCKQUOTE<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public BLOCKQUOTE<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public BLOCKQUOTE<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public BLOCKQUOTE<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public BLOCKQUOTE<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public BLOCKQUOTE<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public BLOCKQUOTE<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public BLOCKQUOTE<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public BLOCKQUOTE<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public BLOCKQUOTE<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public BLOCKQUOTE<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public BLOCKQUOTE<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public BLOCKQUOTE<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<BLOCKQUOTE<T>> table() {
+ closeAttrs();
+ return table_(this, false);
+ }
+
+ @Override
+ public TABLE<BLOCKQUOTE<T>> table(String selector) {
+ return setSelector(table(), selector);
+ }
+
+ @Override
+ public BLOCKQUOTE<T> address(String cdata) {
+ return address()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<BLOCKQUOTE<T>> address() {
+ closeAttrs();
+ return address_(this, false);
+ }
+
+ @Override
+ public P<BLOCKQUOTE<T>> p(String selector) {
+ return setSelector(p(), selector);
+ }
+
+ @Override
+ public P<BLOCKQUOTE<T>> p() {
+ closeAttrs();
+ return p_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<T> _(Class<? extends SubView> cls) {
+ _v(cls);
+ return this;
+ }
+
+ @Override
+ public HR<BLOCKQUOTE<T>> hr() {
+ closeAttrs();
+ return hr_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<T> hr(String selector) {
+ return setSelector(hr(), selector)._();
+ }
+
+ @Override
+ public DL<BLOCKQUOTE<T>> dl(String selector) {
+ return setSelector(dl(), selector);
+ }
+
+ @Override
+ public DL<BLOCKQUOTE<T>> dl() {
+ closeAttrs();
+ return dl_(this, false);
+ }
+
+ @Override
+ public DIV<BLOCKQUOTE<T>> div(String selector) {
+ return setSelector(div(), selector);
+ }
+
+ @Override
+ public DIV<BLOCKQUOTE<T>> div() {
+ closeAttrs();
+ return div_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<BLOCKQUOTE<T>> blockquote() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<BLOCKQUOTE<T>> bq() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<T> h1(String cdata) {
+ return h1()._(cdata)._();
+ }
+
+ @Override
+ public H1<BLOCKQUOTE<T>> h1() {
+ closeAttrs();
+ return h1_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<T> h1(String selector, String cdata) {
+ return setSelector(h1(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BLOCKQUOTE<T> h2(String cdata) {
+ return h2()._(cdata)._();
+ }
+
+ @Override
+ public H2<BLOCKQUOTE<T>> h2() {
+ closeAttrs();
+ return h2_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<T> h2(String selector, String cdata) {
+ return setSelector(h2(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H3<BLOCKQUOTE<T>> h3() {
+ closeAttrs();
+ return h3_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<T> h3(String cdata) {
+ return h3()._(cdata)._();
+ }
+
+ @Override
+ public BLOCKQUOTE<T> h3(String selector, String cdata) {
+ return setSelector(h3(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H4<BLOCKQUOTE<T>> h4() {
+ closeAttrs();
+ return h4_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<T> h4(String cdata) {
+ return h4()._(cdata)._();
+ }
+
+ @Override
+ public BLOCKQUOTE<T> h4(String selector, String cdata) {
+ return setSelector(h4(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H5<BLOCKQUOTE<T>> h5() {
+ closeAttrs();
+ return h5_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<T> h5(String cdata) {
+ return h5()._(cdata)._();
+ }
+
+ @Override
+ public BLOCKQUOTE<T> h5(String selector, String cdata) {
+ return setSelector(h5(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H6<BLOCKQUOTE<T>> h6() {
+ closeAttrs();
+ return h6_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<T> h6(String cdata) {
+ return h6()._(cdata)._();
+ }
+
+ @Override
+ public BLOCKQUOTE<T> h6(String selector, String cdata) {
+ return setSelector(h6(), selector)._(cdata)._();
+ }
+
+ @Override
+ public UL<BLOCKQUOTE<T>> ul() {
+ closeAttrs();
+ return ul_(this, false);
+ }
+
+ @Override
+ public UL<BLOCKQUOTE<T>> ul(String selector) {
+ return setSelector(ul(), selector);
+ }
+
+ @Override
+ public OL<BLOCKQUOTE<T>> ol() {
+ closeAttrs();
+ return ol_(this, false);
+ }
+
+ @Override
+ public OL<BLOCKQUOTE<T>> ol(String selector) {
+ return setSelector(ol(), selector);
+ }
+
+ @Override
+ public PRE<BLOCKQUOTE<T>> pre() {
+ closeAttrs();
+ return pre_(this, false);
+ }
+
+ @Override
+ public PRE<BLOCKQUOTE<T>> pre(String selector) {
+ return setSelector(pre(), selector);
+ }
+
+ @Override
+ public FORM<BLOCKQUOTE<T>> form() {
+ closeAttrs();
+ return form_(this, false);
+ }
+
+ @Override
+ public FORM<BLOCKQUOTE<T>> form(String selector) {
+ return setSelector(form(), selector);
+ }
+
+ @Override
+ public FIELDSET<BLOCKQUOTE<T>> fieldset() {
+ closeAttrs();
+ return fieldset_(this, false);
+ }
+
+ @Override
+ public FIELDSET<BLOCKQUOTE<T>> fieldset(String selector) {
+ return setSelector(fieldset(), selector);
+ }
+
+ @Override
+ public SCRIPT<BLOCKQUOTE<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public BLOCKQUOTE<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+ }
+
+ public class Q<T extends _> extends EImp<T> implements HamletSpec.Q {
+ public Q(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public Q<T> $cite(String value) {
+ addAttr("cite", value);
+ return this;
+ }
+
+ @Override
+ public Q<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public Q<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public Q<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public Q<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public Q<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public Q<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public Q<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public Q<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public Q<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public Q<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public Q<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public Q<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public Q<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public Q<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public Q<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public Q<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public Q<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public Q<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<Q<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public Q<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public Q<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<Q<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public Q<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public Q<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<Q<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public Q<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public Q<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<Q<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public Q<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<Q<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public Q<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public Q<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<Q<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public Q<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public Q<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<Q<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public Q<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public Q<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<Q<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public Q<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<Q<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public Q<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public Q<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<Q<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public Q<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public Q<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<Q<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public Q<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public Q<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<Q<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public Q<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public Q<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<Q<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<Q<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public Q<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public Q<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<Q<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public Q<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<Q<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<Q<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<Q<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public Q<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public Q<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<Q<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public Q<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public Q<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<Q<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<Q<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public Q<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public Q<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<Q<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<Q<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public Q<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<Q<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public Q<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<Q<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public Q<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public Q<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<Q<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public Q<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<Q<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public Q<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<Q<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public Q<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<Q<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public Q<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<Q<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<Q<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<Q<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<Q<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<Q<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<Q<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public Q<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<Q<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<Q<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public Q<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class PRE<T extends _> extends EImp<T> implements HamletSpec.PRE {
+ public PRE(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public PRE<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public PRE<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public PRE<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public PRE<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public PRE<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public PRE<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public PRE<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public PRE<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public PRE<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public PRE<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public PRE<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public PRE<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public PRE<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public PRE<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public PRE<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public PRE<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public PRE<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public PRE<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<PRE<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public PRE<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public PRE<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<PRE<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public PRE<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public PRE<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public PRE<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<PRE<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public PRE<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<PRE<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public PRE<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public PRE<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<PRE<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public PRE<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public PRE<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<PRE<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public PRE<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public PRE<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public PRE<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<PRE<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public PRE<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<PRE<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public PRE<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public PRE<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<PRE<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public PRE<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public PRE<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<PRE<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public PRE<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public PRE<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<PRE<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public PRE<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public PRE<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<PRE<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<PRE<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public PRE<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public PRE<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public MAP<PRE<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<PRE<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public PRE<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public PRE<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<PRE<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<PRE<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public PRE<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<PRE<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public PRE<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<PRE<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public PRE<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public PRE<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<PRE<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public PRE<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<PRE<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public PRE<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<PRE<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public PRE<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<PRE<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public PRE<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<PRE<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<PRE<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<PRE<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<PRE<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<PRE<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<PRE<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public PRE<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<PRE<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<PRE<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public PRE<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class H6<T extends _> extends EImp<T> implements HamletSpec.H6 {
+ public H6(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public H6<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public H6<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public H6<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public H6<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public H6<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public H6<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public H6<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public H6<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public H6<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public H6<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public H6<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public H6<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public H6<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public H6<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public H6<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public H6<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public H6<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public H6<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<H6<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public H6<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public H6<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<H6<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public H6<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public H6<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<H6<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public H6<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public H6<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H6<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<H6<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public H6<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<H6<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public H6<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public H6<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<H6<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public H6<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public H6<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<H6<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public H6<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public H6<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H6<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<H6<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public H6<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<H6<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public H6<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public H6<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<H6<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public H6<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public H6<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<H6<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public H6<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public H6<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<H6<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public H6<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public H6<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<H6<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<H6<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public H6<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public H6<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<H6<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public H6<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<H6<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<H6<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<H6<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public H6<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public H6<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<H6<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public H6<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public H6<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<H6<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<H6<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public H6<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public H6<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<H6<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<H6<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public H6<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<H6<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public H6<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<H6<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public H6<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public H6<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<H6<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public H6<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<H6<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public H6<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<H6<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public H6<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<H6<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public H6<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<H6<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<H6<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<H6<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<H6<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<H6<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<H6<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public H6<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<H6<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<H6<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public H6<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class H5<T extends _> extends EImp<T> implements HamletSpec.H5 {
+ public H5(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public H5<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public H5<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public H5<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public H5<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public H5<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public H5<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public H5<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public H5<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public H5<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public H5<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public H5<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public H5<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public H5<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public H5<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public H5<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public H5<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public H5<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public H5<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<H5<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public H5<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public H5<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<H5<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public H5<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public H5<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<H5<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public H5<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public H5<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H5<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<H5<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public H5<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<H5<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public H5<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public H5<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<H5<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public H5<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public H5<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<H5<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public H5<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public H5<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H5<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<H5<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public H5<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<H5<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public H5<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public H5<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<H5<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public H5<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public H5<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<H5<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public H5<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public H5<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<H5<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public H5<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public H5<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<H5<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<H5<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public H5<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public H5<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<H5<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public H5<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<H5<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<H5<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<H5<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public H5<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public H5<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<H5<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public H5<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public H5<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<H5<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<H5<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public H5<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public H5<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<H5<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<H5<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public H5<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<H5<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public H5<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<H5<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public H5<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public H5<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<H5<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public H5<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<H5<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public H5<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<H5<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public H5<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<H5<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public H5<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<H5<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<H5<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<H5<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<H5<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<H5<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<H5<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public H5<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<H5<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<H5<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public H5<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class H4<T extends _> extends EImp<T> implements HamletSpec.H4 {
+ public H4(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public H4<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public H4<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public H4<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public H4<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public H4<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public H4<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public H4<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public H4<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public H4<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public H4<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public H4<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public H4<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public H4<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public H4<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public H4<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public H4<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public H4<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public H4<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<H4<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public H4<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public H4<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<H4<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public H4<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public H4<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<H4<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public H4<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public H4<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H4<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<H4<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public H4<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<H4<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public H4<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public H4<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<H4<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public H4<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public H4<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<H4<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public H4<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public H4<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H4<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<H4<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public H4<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<H4<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public H4<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public H4<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<H4<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public H4<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public H4<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<H4<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public H4<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public H4<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<H4<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public H4<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public H4<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<H4<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<H4<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public H4<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public H4<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<H4<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public H4<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<H4<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<H4<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<H4<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public H4<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public H4<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<H4<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public H4<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public H4<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<H4<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<H4<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public H4<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public H4<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<H4<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<H4<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public H4<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<H4<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public H4<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<H4<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public H4<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public H4<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<H4<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public H4<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<H4<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public H4<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<H4<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public H4<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<H4<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public H4<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<H4<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<H4<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<H4<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<H4<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<H4<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<H4<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public H4<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<H4<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<H4<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public H4<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class H3<T extends _> extends EImp<T> implements HamletSpec.H3 {
+ public H3(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public H3<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public H3<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public H3<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public H3<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public H3<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public H3<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public H3<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public H3<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public H3<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public H3<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public H3<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public H3<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public H3<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public H3<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public H3<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public H3<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public H3<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public H3<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<H3<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public H3<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public H3<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<H3<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public H3<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public H3<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<H3<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public H3<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public H3<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H3<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<H3<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public H3<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<H3<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public H3<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public H3<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<H3<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public H3<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public H3<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<H3<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public H3<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public H3<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H3<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<H3<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public H3<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<H3<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public H3<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public H3<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<H3<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public H3<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public H3<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<H3<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public H3<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public H3<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<H3<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public H3<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public H3<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<H3<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<H3<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public H3<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public H3<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<H3<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public H3<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<H3<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<H3<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<H3<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public H3<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public H3<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<H3<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public H3<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public H3<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<H3<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<H3<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public H3<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public H3<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<H3<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<H3<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public H3<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<H3<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public H3<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<H3<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public H3<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public H3<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<H3<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public H3<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<H3<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public H3<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<H3<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public H3<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<H3<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public H3<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<H3<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<H3<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<H3<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<H3<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<H3<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<H3<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public H3<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<H3<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<H3<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public H3<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class H2<T extends _> extends EImp<T> implements HamletSpec.H2 {
+ public H2(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public H2<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public H2<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public H2<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public H2<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public H2<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public H2<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public H2<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public H2<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public H2<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public H2<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public H2<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public H2<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public H2<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public H2<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public H2<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public H2<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public H2<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public H2<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<H2<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public H2<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public H2<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<H2<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public H2<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public H2<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<H2<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public H2<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public H2<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H2<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<H2<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public H2<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<H2<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public H2<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public H2<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<H2<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public H2<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public H2<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<H2<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public H2<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public H2<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H2<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<H2<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public H2<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<H2<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public H2<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public H2<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<H2<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public H2<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public H2<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<H2<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public H2<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public H2<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<H2<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public H2<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public H2<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<H2<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<H2<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public H2<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public H2<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<H2<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public H2<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<H2<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<H2<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<H2<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public H2<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public H2<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<H2<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public H2<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public H2<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<H2<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<H2<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public H2<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public H2<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<H2<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<H2<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public H2<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<H2<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public H2<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<H2<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public H2<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public H2<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<H2<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public H2<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<H2<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public H2<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<H2<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public H2<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<H2<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public H2<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<H2<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<H2<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<H2<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<H2<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<H2<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<H2<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public H2<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<H2<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<H2<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public H2<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class H1<T extends _> extends EImp<T> implements HamletSpec.H1 {
+ public H1(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public H1<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public H1<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public H1<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public H1<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public H1<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public H1<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public H1<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public H1<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public H1<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public H1<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public H1<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public H1<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public H1<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public H1<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public H1<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public H1<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public H1<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public H1<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<H1<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public H1<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public H1<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<H1<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public H1<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public H1<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<H1<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public H1<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public H1<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H1<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<H1<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public H1<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<H1<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public H1<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public H1<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<H1<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public H1<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public H1<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<H1<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public H1<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public H1<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H1<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<H1<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public H1<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<H1<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public H1<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public H1<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<H1<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public H1<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public H1<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<H1<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public H1<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public H1<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<H1<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public H1<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public H1<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<H1<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<H1<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public H1<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public H1<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<H1<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public H1<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<H1<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<H1<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<H1<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public H1<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public H1<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<H1<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public H1<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public H1<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<H1<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<H1<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public H1<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public H1<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<H1<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<H1<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public H1<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<H1<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public H1<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<H1<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public H1<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public H1<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<H1<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public H1<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<H1<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public H1<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<H1<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public H1<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<H1<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public H1<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<H1<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<H1<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<H1<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<H1<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<H1<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<H1<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public H1<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<H1<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<H1<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public H1<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class P<T extends _> extends EImp<T> implements HamletSpec.P {
+ public P(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public P<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public P<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public P<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public P<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public P<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public P<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public P<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public P<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public P<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public P<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public P<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public P<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public P<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public P<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public P<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public P<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public P<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public P<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<P<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public P<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public P<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<P<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public P<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public P<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<P<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public P<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public P<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public P<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<P<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public P<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<P<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public P<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public P<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<P<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public P<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public P<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<P<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public P<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public P<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public P<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<P<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public P<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<P<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public P<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public P<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<P<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public P<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public P<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<P<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public P<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public P<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<P<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public P<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public P<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<P<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<P<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public P<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public P<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<P<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public P<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<P<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<P<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<P<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public P<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public P<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<P<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public P<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public P<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<P<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<P<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public P<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public P<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<P<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<P<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public P<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<P<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public P<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<P<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public P<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public P<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<P<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public P<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<P<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public P<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<P<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public P<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<P<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public P<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<P<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<P<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<P<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<P<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<P<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<P<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public P<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<P<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<P<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public P<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class HR<T extends _> extends EImp<T> implements HamletSpec.HR {
+ public HR(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public HR<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public HR<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public HR<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public HR<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public HR<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public HR<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public HR<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public HR<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public HR<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public HR<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public HR<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public HR<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public HR<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public HR<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public HR<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public HR<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+ }
+
+ public class PARAM<T extends _> extends EImp<T> implements HamletSpec.PARAM {
+ public PARAM(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public PARAM<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public PARAM<T> $name(String value) {
+ addAttr("name", value);
+ return this;
+ }
+
+ @Override
+ public PARAM<T> $value(String value) {
+ addAttr("value", value);
+ return this;
+ }
+ }
+
+ public class OBJECT<T extends _> extends EImp<T> implements HamletSpec.OBJECT {
+ public OBJECT(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public OBJECT<T> $type(String value) {
+ addAttr("type", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $name(String value) {
+ addAttr("name", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $tabindex(int value) {
+ addAttr("tabindex", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $data(String value) {
+ addAttr("data", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $height(String value) {
+ addAttr("height", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $height(int value) {
+ addAttr("height", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $width(int value) {
+ addAttr("width", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $width(String value) {
+ addAttr("width", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $usemap(String value) {
+ addAttr("usemap", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public PARAM<OBJECT<T>> param() {
+ closeAttrs();
+ return param_(this, false);
+ }
+
+ @Override
+ public OBJECT<T> param(String name, String value) {
+ return param().$name(name).$value(value)._();
+ }
+
+ @Override
+ public TABLE<OBJECT<T>> table() {
+ closeAttrs();
+ return table_(this, false);
+ }
+
+ @Override
+ public TABLE<OBJECT<T>> table(String selector) {
+ return setSelector(table(), selector);
+ }
+
+ @Override
+ public OBJECT<T> address(String cdata) {
+ return address()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<OBJECT<T>> address() {
+ closeAttrs();
+ return address_(this, false);
+ }
+
+ @Override
+ public P<OBJECT<T>> p(String selector) {
+ return setSelector(p(), selector);
+ }
+
+ @Override
+ public P<OBJECT<T>> p() {
+ closeAttrs();
+ return p_(this, false);
+ }
+
+ @Override
+ public OBJECT<T> _(Class<? extends SubView> cls) {
+ _v(cls);
+ return this;
+ }
+
+ @Override
+ public HR<OBJECT<T>> hr() {
+ closeAttrs();
+ return hr_(this, false);
+ }
+
+ @Override
+ public OBJECT<T> hr(String selector) {
+ return setSelector(hr(), selector)._();
+ }
+
+ @Override
+ public DL<OBJECT<T>> dl(String selector) {
+ return setSelector(dl(), selector);
+ }
+
+ @Override
+ public DL<OBJECT<T>> dl() {
+ closeAttrs();
+ return dl_(this, false);
+ }
+
+ @Override
+ public DIV<OBJECT<T>> div(String selector) {
+ return setSelector(div(), selector);
+ }
+
+ @Override
+ public DIV<OBJECT<T>> div() {
+ closeAttrs();
+ return div_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<OBJECT<T>> blockquote() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<OBJECT<T>> bq() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public OBJECT<T> h1(String cdata) {
+ return h1()._(cdata)._();
+ }
+
+ @Override
+ public H1<OBJECT<T>> h1() {
+ closeAttrs();
+ return h1_(this, false);
+ }
+
+ @Override
+ public OBJECT<T> h1(String selector, String cdata) {
+ return setSelector(h1(), selector)._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> h2(String cdata) {
+ return h2()._(cdata)._();
+ }
+
+ @Override
+ public H2<OBJECT<T>> h2() {
+ closeAttrs();
+ return h2_(this, false);
+ }
+
+ @Override
+ public OBJECT<T> h2(String selector, String cdata) {
+ return setSelector(h2(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H3<OBJECT<T>> h3() {
+ closeAttrs();
+ return h3_(this, false);
+ }
+
+ @Override
+ public OBJECT<T> h3(String cdata) {
+ return h3()._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> h3(String selector, String cdata) {
+ return setSelector(h3(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H4<OBJECT<T>> h4() {
+ closeAttrs();
+ return h4_(this, false);
+ }
+
+ @Override
+ public OBJECT<T> h4(String cdata) {
+ return h4()._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> h4(String selector, String cdata) {
+ return setSelector(h4(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H5<OBJECT<T>> h5() {
+ closeAttrs();
+ return h5_(this, false);
+ }
+
+ @Override
+ public OBJECT<T> h5(String cdata) {
+ return h5()._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> h5(String selector, String cdata) {
+ return setSelector(h5(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H6<OBJECT<T>> h6() {
+ closeAttrs();
+ return h6_(this, false);
+ }
+
+ @Override
+ public OBJECT<T> h6(String cdata) {
+ return h6()._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> h6(String selector, String cdata) {
+ return setSelector(h6(), selector)._(cdata)._();
+ }
+
+ @Override
+ public UL<OBJECT<T>> ul() {
+ closeAttrs();
+ return ul_(this, false);
+ }
+
+ @Override
+ public UL<OBJECT<T>> ul(String selector) {
+ return setSelector(ul(), selector);
+ }
+
+ @Override
+ public OL<OBJECT<T>> ol() {
+ closeAttrs();
+ return ol_(this, false);
+ }
+
+ @Override
+ public OL<OBJECT<T>> ol(String selector) {
+ return setSelector(ol(), selector);
+ }
+
+ @Override
+ public PRE<OBJECT<T>> pre() {
+ closeAttrs();
+ return pre_(this, false);
+ }
+
+ @Override
+ public PRE<OBJECT<T>> pre(String selector) {
+ return setSelector(pre(), selector);
+ }
+
+ @Override
+ public FORM<OBJECT<T>> form() {
+ closeAttrs();
+ return form_(this, false);
+ }
+
+ @Override
+ public FORM<OBJECT<T>> form(String selector) {
+ return setSelector(form(), selector);
+ }
+
+ @Override
+ public FIELDSET<OBJECT<T>> fieldset() {
+ closeAttrs();
+ return fieldset_(this, false);
+ }
+
+ @Override
+ public FIELDSET<OBJECT<T>> fieldset(String selector) {
+ return setSelector(fieldset(), selector);
+ }
+
+ @Override
+ public OBJECT<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public OBJECT<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<OBJECT<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<OBJECT<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<OBJECT<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<OBJECT<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<OBJECT<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<OBJECT<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<OBJECT<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<OBJECT<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<OBJECT<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<OBJECT<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<OBJECT<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<OBJECT<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<OBJECT<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<OBJECT<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public OBJECT<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public OBJECT<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<OBJECT<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<OBJECT<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<OBJECT<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<OBJECT<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<OBJECT<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<OBJECT<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<OBJECT<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public OBJECT<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<OBJECT<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<OBJECT<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<OBJECT<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<OBJECT<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public OBJECT<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<OBJECT<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<OBJECT<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<OBJECT<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<OBJECT<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<OBJECT<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<OBJECT<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<OBJECT<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<OBJECT<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<OBJECT<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<OBJECT<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public OBJECT<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<OBJECT<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<OBJECT<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public OBJECT<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ private <T extends _> PARAM<T> param_(T e, boolean inline) {
+ return new PARAM<T>("param", e, opt(false, inline, false)); }
+
+ public class IMG<T extends _> extends EImp<T> implements HamletSpec.IMG {
+ public IMG(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public IMG<T> $src(String value) {
+ addAttr("src", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $alt(String value) {
+ addAttr("alt", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $ismap() {
+ addAttr("ismap", null);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $height(String value) {
+ addAttr("height", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $height(int value) {
+ addAttr("height", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $width(int value) {
+ addAttr("width", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $width(String value) {
+ addAttr("width", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $usemap(String value) {
+ addAttr("usemap", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public IMG<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+ }
+
+ public class LINK<T extends _> extends EImp<T> implements HamletSpec.LINK {
+ public LINK(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public LINK<T> $rel(EnumSet<LinkType> value) {
+ addRelAttr("rel", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $rel(String value) {
+ addAttr("rel", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $href(String value) {
+ addAttr("href", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $type(String value) {
+ addAttr("type", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $media(EnumSet<Media> value) {
+ addMediaAttr("media", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $media(String value) {
+ addAttr("media", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $hreflang(String value) {
+ addAttr("hreflang", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public LINK<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+ }
+
+ public class AREA<T extends _> extends EImp<T> implements HamletSpec.AREA {
+ public AREA(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public AREA<T> $href(String value) {
+ addAttr("href", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $tabindex(int value) {
+ addAttr("tabindex", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $accesskey(String value) {
+ addAttr("accesskey", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $onfocus(String value) {
+ addAttr("onfocus", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $onblur(String value) {
+ addAttr("onblur", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $alt(String value) {
+ addAttr("alt", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $shape(Shape value) {
+ addAttr("shape", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $coords(String value) {
+ addAttr("coords", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public AREA<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+ }
+
+ private <T extends _> AREA<T> area_(T e, boolean inline) {
+ return new AREA<T>("area", e, opt(false, inline, false)); }
+
+ public class MAP<T extends _> extends EImp<T> implements HamletSpec.MAP {
+ public MAP(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public MAP<T> $name(String value) {
+ addAttr("name", value);
+ return this;
+ }
+
+ @Override
+ public AREA<MAP<T>> area() {
+ closeAttrs();
+ return area_(this, false);
+ }
+
+ @Override
+ public AREA<MAP<T>> area(String selector) {
+ return setSelector(area(), selector);
+ }
+
+ @Override
+ public MAP<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public MAP<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public MAP<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public MAP<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public MAP<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public MAP<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public MAP<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public MAP<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public MAP<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public MAP<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public MAP<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public MAP<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public MAP<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public MAP<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public MAP<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public MAP<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<MAP<T>> table() {
+ closeAttrs();
+ return table_(this, false);
+ }
+
+ @Override
+ public TABLE<MAP<T>> table(String selector) {
+ return setSelector(table(), selector);
+ }
+
+ @Override
+ public MAP<T> address(String cdata) {
+ return address()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<MAP<T>> address() {
+ closeAttrs();
+ return address_(this, false);
+ }
+
+ @Override
+ public P<MAP<T>> p(String selector) {
+ return setSelector(p(), selector);
+ }
+
+ @Override
+ public P<MAP<T>> p() {
+ closeAttrs();
+ return p_(this, false);
+ }
+
+ @Override
+ public MAP<T> _(Class<? extends SubView> cls) {
+ _v(cls);
+ return this;
+ }
+
+ @Override
+ public HR<MAP<T>> hr() {
+ closeAttrs();
+ return hr_(this, false);
+ }
+
+ @Override
+ public MAP<T> hr(String selector) {
+ return setSelector(hr(), selector)._();
+ }
+
+ @Override
+ public DL<MAP<T>> dl(String selector) {
+ return setSelector(dl(), selector);
+ }
+
+ @Override
+ public DL<MAP<T>> dl() {
+ closeAttrs();
+ return dl_(this, false);
+ }
+
+ @Override
+ public DIV<MAP<T>> div(String selector) {
+ return setSelector(div(), selector);
+ }
+
+ @Override
+ public DIV<MAP<T>> div() {
+ closeAttrs();
+ return div_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<MAP<T>> blockquote() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<MAP<T>> bq() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public MAP<T> h1(String cdata) {
+ return h1()._(cdata)._();
+ }
+
+ @Override
+ public H1<MAP<T>> h1() {
+ closeAttrs();
+ return h1_(this, false);
+ }
+
+ @Override
+ public MAP<T> h1(String selector, String cdata) {
+ return setSelector(h1(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<T> h2(String cdata) {
+ return h2()._(cdata)._();
+ }
+
+ @Override
+ public H2<MAP<T>> h2() {
+ closeAttrs();
+ return h2_(this, false);
+ }
+
+ @Override
+ public MAP<T> h2(String selector, String cdata) {
+ return setSelector(h2(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H3<MAP<T>> h3() {
+ closeAttrs();
+ return h3_(this, false);
+ }
+
+ @Override
+ public MAP<T> h3(String cdata) {
+ return h3()._(cdata)._();
+ }
+
+ @Override
+ public MAP<T> h3(String selector, String cdata) {
+ return setSelector(h3(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H4<MAP<T>> h4() {
+ closeAttrs();
+ return h4_(this, false);
+ }
+
+ @Override
+ public MAP<T> h4(String cdata) {
+ return h4()._(cdata)._();
+ }
+
+ @Override
+ public MAP<T> h4(String selector, String cdata) {
+ return setSelector(h4(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H5<MAP<T>> h5() {
+ closeAttrs();
+ return h5_(this, false);
+ }
+
+ @Override
+ public MAP<T> h5(String cdata) {
+ return h5()._(cdata)._();
+ }
+
+ @Override
+ public MAP<T> h5(String selector, String cdata) {
+ return setSelector(h5(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H6<MAP<T>> h6() {
+ closeAttrs();
+ return h6_(this, false);
+ }
+
+ @Override
+ public MAP<T> h6(String cdata) {
+ return h6()._(cdata)._();
+ }
+
+ @Override
+ public MAP<T> h6(String selector, String cdata) {
+ return setSelector(h6(), selector)._(cdata)._();
+ }
+
+ @Override
+ public UL<MAP<T>> ul() {
+ closeAttrs();
+ return ul_(this, false);
+ }
+
+ @Override
+ public UL<MAP<T>> ul(String selector) {
+ return setSelector(ul(), selector);
+ }
+
+ @Override
+ public OL<MAP<T>> ol() {
+ closeAttrs();
+ return ol_(this, false);
+ }
+
+ @Override
+ public OL<MAP<T>> ol(String selector) {
+ return setSelector(ol(), selector);
+ }
+
+ @Override
+ public PRE<MAP<T>> pre() {
+ closeAttrs();
+ return pre_(this, false);
+ }
+
+ @Override
+ public PRE<MAP<T>> pre(String selector) {
+ return setSelector(pre(), selector);
+ }
+
+ @Override
+ public FORM<MAP<T>> form() {
+ closeAttrs();
+ return form_(this, false);
+ }
+
+ @Override
+ public FORM<MAP<T>> form(String selector) {
+ return setSelector(form(), selector);
+ }
+
+ @Override
+ public FIELDSET<MAP<T>> fieldset() {
+ closeAttrs();
+ return fieldset_(this, false);
+ }
+
+ @Override
+ public FIELDSET<MAP<T>> fieldset(String selector) {
+ return setSelector(fieldset(), selector);
+ }
+ }
+
+ public class A<T extends _> extends EImp<T> implements HamletSpec.A {
+ public A(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public A<T> $rel(EnumSet<LinkType> value) {
+ addRelAttr("rel", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $rel(String value) {
+ addAttr("rel", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $href(String value) {
+ addAttr("href", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $type(String value) {
+ addAttr("type", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $tabindex(int value) {
+ addAttr("tabindex", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $accesskey(String value) {
+ addAttr("accesskey", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $onfocus(String value) {
+ addAttr("onfocus", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $onblur(String value) {
+ addAttr("onblur", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $hreflang(String value) {
+ addAttr("hreflang", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public A<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public A<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public A<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<A<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public A<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public A<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<A<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public A<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public A<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<A<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public A<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public A<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<A<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public A<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<A<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public A<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public A<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<A<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public A<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public A<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<A<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public A<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public A<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<A<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public A<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<A<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public A<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public A<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<A<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public A<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public A<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<A<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public A<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public A<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<A<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public A<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public A<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public IMG<A<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public A<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<A<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<A<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public MAP<A<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<A<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public A<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public A<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<A<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<A<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public A<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<A<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public A<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<A<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public A<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public A<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<A<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public A<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<A<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public A<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<A<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public A<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public SUB<A<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public A<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public A<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<A<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public A<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public A<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public LABEL<A<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public A<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<A<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<A<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<A<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<A<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<A<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<A<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public A<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<A<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<A<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public A<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class DIV<T extends _> extends EImp<T> implements HamletSpec.DIV {
+ public DIV(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public DIV<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public DIV<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public DIV<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public DIV<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public DIV<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public DIV<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public DIV<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public DIV<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public DIV<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public DIV<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public DIV<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public DIV<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public DIV<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public DIV<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public DIV<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public DIV<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<DIV<T>> table() {
+ closeAttrs();
+ return table_(this, false);
+ }
+
+ @Override
+ public TABLE<DIV<T>> table(String selector) {
+ return setSelector(table(), selector);
+ }
+
+ @Override
+ public DIV<T> address(String cdata) {
+ return address()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<DIV<T>> address() {
+ closeAttrs();
+ return address_(this, false);
+ }
+
+ @Override
+ public P<DIV<T>> p(String selector) {
+ return setSelector(p(), selector);
+ }
+
+ @Override
+ public P<DIV<T>> p() {
+ closeAttrs();
+ return p_(this, false);
+ }
+
+ @Override
+ public DIV<T> _(Class<? extends SubView> cls) {
+ _v(cls);
+ return this;
+ }
+
+ @Override
+ public HR<DIV<T>> hr() {
+ closeAttrs();
+ return hr_(this, false);
+ }
+
+ @Override
+ public DIV<T> hr(String selector) {
+ return setSelector(hr(), selector)._();
+ }
+
+ @Override
+ public DL<DIV<T>> dl(String selector) {
+ return setSelector(dl(), selector);
+ }
+
+ @Override
+ public DL<DIV<T>> dl() {
+ closeAttrs();
+ return dl_(this, false);
+ }
+
+ @Override
+ public DIV<DIV<T>> div(String selector) {
+ return setSelector(div(), selector);
+ }
+
+ @Override
+ public DIV<DIV<T>> div() {
+ closeAttrs();
+ return div_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<DIV<T>> blockquote() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<DIV<T>> bq() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public DIV<T> h1(String cdata) {
+ return h1()._(cdata)._();
+ }
+
+ @Override
+ public H1<DIV<T>> h1() {
+ closeAttrs();
+ return h1_(this, false);
+ }
+
+ @Override
+ public DIV<T> h1(String selector, String cdata) {
+ return setSelector(h1(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> h2(String cdata) {
+ return h2()._(cdata)._();
+ }
+
+ @Override
+ public H2<DIV<T>> h2() {
+ closeAttrs();
+ return h2_(this, false);
+ }
+
+ @Override
+ public DIV<T> h2(String selector, String cdata) {
+ return setSelector(h2(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H3<DIV<T>> h3() {
+ closeAttrs();
+ return h3_(this, false);
+ }
+
+ @Override
+ public DIV<T> h3(String cdata) {
+ return h3()._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> h3(String selector, String cdata) {
+ return setSelector(h3(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H4<DIV<T>> h4() {
+ closeAttrs();
+ return h4_(this, false);
+ }
+
+ @Override
+ public DIV<T> h4(String cdata) {
+ return h4()._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> h4(String selector, String cdata) {
+ return setSelector(h4(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H5<DIV<T>> h5() {
+ closeAttrs();
+ return h5_(this, false);
+ }
+
+ @Override
+ public DIV<T> h5(String cdata) {
+ return h5()._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> h5(String selector, String cdata) {
+ return setSelector(h5(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H6<DIV<T>> h6() {
+ closeAttrs();
+ return h6_(this, false);
+ }
+
+ @Override
+ public DIV<T> h6(String cdata) {
+ return h6()._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> h6(String selector, String cdata) {
+ return setSelector(h6(), selector)._(cdata)._();
+ }
+
+ @Override
+ public UL<DIV<T>> ul() {
+ closeAttrs();
+ return ul_(this, false);
+ }
+
+ @Override
+ public UL<DIV<T>> ul(String selector) {
+ return setSelector(ul(), selector);
+ }
+
+ @Override
+ public OL<DIV<T>> ol() {
+ closeAttrs();
+ return ol_(this, false);
+ }
+
+ @Override
+ public OL<DIV<T>> ol(String selector) {
+ return setSelector(ol(), selector);
+ }
+
+ @Override
+ public PRE<DIV<T>> pre() {
+ closeAttrs();
+ return pre_(this, false);
+ }
+
+ @Override
+ public PRE<DIV<T>> pre(String selector) {
+ return setSelector(pre(), selector);
+ }
+
+ @Override
+ public FORM<DIV<T>> form() {
+ closeAttrs();
+ return form_(this, false);
+ }
+
+ @Override
+ public FORM<DIV<T>> form(String selector) {
+ return setSelector(form(), selector);
+ }
+
+ @Override
+ public FIELDSET<DIV<T>> fieldset() {
+ closeAttrs();
+ return fieldset_(this, false);
+ }
+
+ @Override
+ public FIELDSET<DIV<T>> fieldset(String selector) {
+ return setSelector(fieldset(), selector);
+ }
+
+ @Override
+ public DIV<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public DIV<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<DIV<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public DIV<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<DIV<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public DIV<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<DIV<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public DIV<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<DIV<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public DIV<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<DIV<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public DIV<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<DIV<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public DIV<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<DIV<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public DIV<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<DIV<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public DIV<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<DIV<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public DIV<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<DIV<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public DIV<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<DIV<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public DIV<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<DIV<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public DIV<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<DIV<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<DIV<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public DIV<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public DIV<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<DIV<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public DIV<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<DIV<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<DIV<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<DIV<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public DIV<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<DIV<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public DIV<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<DIV<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<DIV<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public DIV<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<DIV<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<DIV<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public DIV<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<DIV<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public DIV<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<DIV<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public DIV<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public DIV<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<DIV<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public DIV<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<DIV<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public DIV<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<DIV<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public DIV<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<DIV<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public DIV<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<DIV<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<DIV<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<DIV<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<DIV<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<DIV<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<DIV<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public DIV<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<DIV<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<DIV<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public DIV<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class ADDRESS<T extends _> extends EImp<T> implements HamletSpec.ADDRESS {
+ public ADDRESS(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public ADDRESS<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public ADDRESS<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public ADDRESS<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public ADDRESS<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public ADDRESS<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public ADDRESS<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public ADDRESS<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public ADDRESS<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public ADDRESS<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public ADDRESS<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public ADDRESS<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public ADDRESS<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public ADDRESS<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public ADDRESS<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public ADDRESS<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public ADDRESS<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public ADDRESS<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public ADDRESS<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<ADDRESS<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<ADDRESS<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<ADDRESS<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<ADDRESS<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<ADDRESS<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<ADDRESS<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<ADDRESS<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<ADDRESS<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<ADDRESS<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<ADDRESS<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<ADDRESS<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<ADDRESS<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<ADDRESS<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<ADDRESS<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public ADDRESS<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public ADDRESS<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<ADDRESS<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<ADDRESS<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<ADDRESS<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<ADDRESS<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<ADDRESS<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<ADDRESS<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<ADDRESS<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public ADDRESS<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<ADDRESS<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<ADDRESS<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<ADDRESS<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<ADDRESS<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<ADDRESS<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<ADDRESS<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<ADDRESS<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<ADDRESS<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<ADDRESS<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<ADDRESS<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<ADDRESS<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<ADDRESS<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<ADDRESS<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<ADDRESS<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public ADDRESS<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<ADDRESS<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<ADDRESS<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public ADDRESS<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class BODY<T extends _> extends EImp<T> implements HamletSpec.BODY {
+ public BODY(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public BODY<T> $onload(String value) {
+ addAttr("onload", value);
+ return this;
+ }
+
+ @Override
+ public BODY<T> $onunload(String value) {
+ addAttr("onunload", value);
+ return this;
+ }
+
+ @Override
+ public BODY<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public BODY<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public BODY<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public BODY<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public BODY<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public BODY<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public BODY<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public BODY<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public BODY<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public BODY<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public BODY<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public BODY<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public BODY<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public BODY<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public BODY<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public BODY<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public TABLE<BODY<T>> table() {
+ closeAttrs();
+ return table_(this, false);
+ }
+
+ @Override
+ public TABLE<BODY<T>> table(String selector) {
+ return setSelector(table(), selector);
+ }
+
+ @Override
+ public BODY<T> address(String cdata) {
+ return address()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<BODY<T>> address() {
+ closeAttrs();
+ return address_(this, false);
+ }
+
+ @Override
+ public P<BODY<T>> p(String selector) {
+ return setSelector(p(), selector);
+ }
+
+ @Override
+ public P<BODY<T>> p() {
+ closeAttrs();
+ return p_(this, false);
+ }
+
+ @Override
+ public BODY<T> _(Class<? extends SubView> cls) {
+ _v(cls);
+ return this;
+ }
+
+ @Override
+ public HR<BODY<T>> hr() {
+ closeAttrs();
+ return hr_(this, false);
+ }
+
+ @Override
+ public BODY<T> hr(String selector) {
+ return setSelector(hr(), selector)._();
+ }
+
+ @Override
+ public DL<BODY<T>> dl(String selector) {
+ return setSelector(dl(), selector);
+ }
+
+ @Override
+ public DL<BODY<T>> dl() {
+ closeAttrs();
+ return dl_(this, false);
+ }
+
+ @Override
+ public DIV<BODY<T>> div(String selector) {
+ return setSelector(div(), selector);
+ }
+
+ @Override
+ public DIV<BODY<T>> div() {
+ closeAttrs();
+ return div_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<BODY<T>> blockquote() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<BODY<T>> bq() {
+ closeAttrs();
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public BODY<T> h1(String cdata) {
+ return h1()._(cdata)._();
+ }
+
+ @Override
+ public H1<BODY<T>> h1() {
+ closeAttrs();
+ return h1_(this, false);
+ }
+
+ @Override
+ public BODY<T> h1(String selector, String cdata) {
+ return setSelector(h1(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BODY<T> h2(String cdata) {
+ return h2()._(cdata)._();
+ }
+
+ @Override
+ public H2<BODY<T>> h2() {
+ closeAttrs();
+ return h2_(this, false);
+ }
+
+ @Override
+ public BODY<T> h2(String selector, String cdata) {
+ return setSelector(h2(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H3<BODY<T>> h3() {
+ closeAttrs();
+ return h3_(this, false);
+ }
+
+ @Override
+ public BODY<T> h3(String cdata) {
+ return h3()._(cdata)._();
+ }
+
+ @Override
+ public BODY<T> h3(String selector, String cdata) {
+ return setSelector(h3(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H4<BODY<T>> h4() {
+ closeAttrs();
+ return h4_(this, false);
+ }
+
+ @Override
+ public BODY<T> h4(String cdata) {
+ return h4()._(cdata)._();
+ }
+
+ @Override
+ public BODY<T> h4(String selector, String cdata) {
+ return setSelector(h4(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H5<BODY<T>> h5() {
+ closeAttrs();
+ return h5_(this, false);
+ }
+
+ @Override
+ public BODY<T> h5(String cdata) {
+ return h5()._(cdata)._();
+ }
+
+ @Override
+ public BODY<T> h5(String selector, String cdata) {
+ return setSelector(h5(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H6<BODY<T>> h6() {
+ closeAttrs();
+ return h6_(this, false);
+ }
+
+ @Override
+ public BODY<T> h6(String cdata) {
+ return h6()._(cdata)._();
+ }
+
+ @Override
+ public BODY<T> h6(String selector, String cdata) {
+ return setSelector(h6(), selector)._(cdata)._();
+ }
+
+ @Override
+ public UL<BODY<T>> ul() {
+ closeAttrs();
+ return ul_(this, false);
+ }
+
+ @Override
+ public UL<BODY<T>> ul(String selector) {
+ return setSelector(ul(), selector);
+ }
+
+ @Override
+ public OL<BODY<T>> ol() {
+ closeAttrs();
+ return ol_(this, false);
+ }
+
+ @Override
+ public OL<BODY<T>> ol(String selector) {
+ return setSelector(ol(), selector);
+ }
+
+ @Override
+ public PRE<BODY<T>> pre() {
+ closeAttrs();
+ return pre_(this, false);
+ }
+
+ @Override
+ public PRE<BODY<T>> pre(String selector) {
+ return setSelector(pre(), selector);
+ }
+
+ @Override
+ public FORM<BODY<T>> form() {
+ closeAttrs();
+ return form_(this, false);
+ }
+
+ @Override
+ public FORM<BODY<T>> form(String selector) {
+ return setSelector(form(), selector);
+ }
+
+ @Override
+ public FIELDSET<BODY<T>> fieldset() {
+ closeAttrs();
+ return fieldset_(this, false);
+ }
+
+ @Override
+ public FIELDSET<BODY<T>> fieldset(String selector) {
+ return setSelector(fieldset(), selector);
+ }
+
+ @Override
+ public SCRIPT<BODY<T>> script() {
+ closeAttrs();
+ return script_(this, false);
+ }
+
+ @Override
+ public BODY<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<BODY<T>> ins() {
+ closeAttrs();
+ return ins_(this, false);
+ }
+
+ @Override
+ public BODY<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<BODY<T>> del() {
+ closeAttrs();
+ return del_(this, false);
+ }
+
+ @Override
+ public BODY<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+ }
+
+ private <T extends _> TABLE<T> table_(T e, boolean inline) {
+ return new TABLE<T>("table", e, opt(true, inline, false)); }
+
+ private <T extends _> ADDRESS<T> address_(T e, boolean inline) {
+ return new ADDRESS<T>("address", e, opt(true, inline, false)); }
+
+ private <T extends _> P<T> p_(T e, boolean inline) {
+ return new P<T>("p", e, opt(false, inline, false)); }
+
+ private <T extends _> HR<T> hr_(T e, boolean inline) {
+ return new HR<T>("hr", e, opt(false, inline, false)); }
+
+ private <T extends _> DL<T> dl_(T e, boolean inline) {
+ return new DL<T>("dl", e, opt(true, inline, false)); }
+
+ private <T extends _> DIV<T> div_(T e, boolean inline) {
+ return new DIV<T>("div", e, opt(true, inline, false)); }
+
+ private <T extends _> BLOCKQUOTE<T> blockquote_(T e, boolean inline) {
+ return new BLOCKQUOTE<T>("blockquote", e, opt(true, inline, false)); }
+
+ private <T extends _> BLOCKQUOTE<T> bq_(T e, boolean inline) {
+ return new BLOCKQUOTE<T>("blockquote", e, opt(true, inline, false)); }
+
+ private <T extends _> FIELDSET<T> fieldset_(T e, boolean inline) {
+ return new FIELDSET<T>("fieldset", e, opt(true, inline, false)); }
+
+ private <T extends _> FORM<T> form_(T e, boolean inline) {
+ return new FORM<T>("form", e, opt(true, inline, false)); }
+
+ public class BR<T extends _> extends EImp<T> implements HamletSpec.BR {
+ public BR(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public BR<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public BR<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public BR<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public BR<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+ }
+
+ public class BDO<T extends _> extends EImp<T> implements HamletSpec.BDO {
+ public BDO(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public BDO<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public BDO<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public BDO<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public BDO<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public BDO<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public BDO<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public BDO<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public BDO<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<BDO<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public BDO<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public BDO<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<BDO<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public BDO<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public BDO<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<BDO<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public BDO<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public BDO<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BDO<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<BDO<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public BDO<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<BDO<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public BDO<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public BDO<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<BDO<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public BDO<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public BDO<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<BDO<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public BDO<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public BDO<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BDO<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<BDO<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public BDO<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<BDO<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public BDO<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public BDO<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<BDO<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public BDO<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public BDO<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<BDO<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public BDO<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public BDO<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<BDO<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public BDO<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public BDO<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<BDO<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<BDO<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public BDO<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public BDO<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<BDO<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public BDO<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<BDO<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<BDO<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<BDO<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public BDO<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public BDO<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<BDO<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public BDO<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public BDO<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<BDO<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<BDO<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public BDO<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public BDO<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<BDO<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<BDO<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public BDO<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<BDO<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public BDO<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<BDO<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public BDO<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public BDO<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<BDO<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public BDO<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<BDO<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public BDO<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<BDO<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public BDO<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<BDO<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public BDO<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<BDO<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<BDO<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<BDO<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<BDO<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<BDO<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<BDO<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public BDO<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<BDO<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<BDO<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public BDO<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class SPAN<T extends _> extends EImp<T> implements HamletSpec.SPAN {
+ public SPAN(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public SPAN<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public SPAN<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public SPAN<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public SPAN<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public SPAN<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public SPAN<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public SPAN<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public SPAN<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public SPAN<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public SPAN<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public SPAN<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public SPAN<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public SPAN<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public SPAN<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public SPAN<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public SPAN<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public SPAN<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public SPAN<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<SPAN<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public SPAN<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public SPAN<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<SPAN<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public SPAN<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public SPAN<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<SPAN<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public SPAN<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public SPAN<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<SPAN<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public SPAN<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<SPAN<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public SPAN<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public SPAN<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<SPAN<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public SPAN<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public SPAN<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<SPAN<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public SPAN<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public SPAN<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<SPAN<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public SPAN<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<SPAN<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public SPAN<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public SPAN<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<SPAN<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public SPAN<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public SPAN<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<SPAN<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public SPAN<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public SPAN<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<SPAN<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public SPAN<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public SPAN<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<SPAN<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<SPAN<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public SPAN<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public SPAN<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<SPAN<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public SPAN<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<SPAN<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<SPAN<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<SPAN<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public SPAN<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public SPAN<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<SPAN<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public SPAN<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public SPAN<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<SPAN<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<SPAN<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public SPAN<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public SPAN<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<SPAN<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<SPAN<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public SPAN<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<SPAN<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public SPAN<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<SPAN<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public SPAN<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public SPAN<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<SPAN<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public SPAN<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<SPAN<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public SPAN<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<SPAN<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public SPAN<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<SPAN<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public SPAN<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<SPAN<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<SPAN<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<SPAN<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<SPAN<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<SPAN<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<SPAN<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public SPAN<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<SPAN<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<SPAN<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public SPAN<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class SUP<T extends _> extends EImp<T> implements HamletSpec.SUP {
+ public SUP(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public SUP<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public SUP<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public SUP<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public SUP<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public SUP<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public SUP<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public SUP<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public SUP<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public SUP<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public SUP<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public SUP<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public SUP<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public SUP<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public SUP<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public SUP<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public SUP<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public SUP<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public SUP<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<SUP<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public SUP<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public SUP<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<SUP<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public SUP<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public SUP<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<SUP<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public SUP<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public SUP<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<SUP<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public SUP<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<SUP<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public SUP<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public SUP<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<SUP<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public SUP<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public SUP<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<SUP<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public SUP<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public SUP<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<SUP<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public SUP<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<SUP<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public SUP<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public SUP<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<SUP<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public SUP<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public SUP<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<SUP<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public SUP<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public SUP<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<SUP<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public SUP<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public SUP<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<SUP<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<SUP<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public SUP<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public SUP<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<SUP<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public SUP<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<SUP<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<SUP<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<SUP<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public SUP<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public SUP<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<SUP<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public SUP<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public SUP<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<SUP<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<SUP<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public SUP<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public SUP<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<SUP<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<SUP<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public SUP<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<SUP<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public SUP<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<SUP<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public SUP<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public SUP<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<SUP<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public SUP<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<SUP<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public SUP<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<SUP<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public SUP<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<SUP<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public SUP<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<SUP<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<SUP<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<SUP<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<SUP<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<SUP<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<SUP<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public SUP<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<SUP<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<SUP<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public SUP<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class SUB<T extends _> extends EImp<T> implements HamletSpec.SUB {
+ public SUB(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public SUB<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public SUB<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public SUB<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public SUB<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public SUB<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public SUB<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public SUB<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public SUB<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public SUB<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public SUB<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public SUB<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public SUB<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public SUB<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public SUB<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public SUB<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public SUB<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public SUB<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public SUB<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<SUB<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public SUB<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public SUB<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<SUB<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public SUB<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public SUB<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<SUB<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public SUB<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public SUB<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUB<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<SUB<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public SUB<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<SUB<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public SUB<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public SUB<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<SUB<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public SUB<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public SUB<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<SUB<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public SUB<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public SUB<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUB<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<SUB<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public SUB<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<SUB<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public SUB<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public SUB<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<SUB<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public SUB<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public SUB<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<SUB<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public SUB<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public SUB<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<SUB<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public SUB<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public SUB<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<SUB<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<SUB<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public SUB<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public SUB<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<SUB<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public SUB<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<SUB<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<SUB<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<SUB<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public SUB<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public SUB<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<SUB<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public SUB<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public SUB<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<SUB<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<SUB<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public SUB<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public SUB<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<SUB<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<SUB<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public SUB<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<SUB<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public SUB<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<SUB<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public SUB<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public SUB<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<SUB<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public SUB<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<SUB<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public SUB<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<SUB<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public SUB<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<SUB<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public SUB<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<SUB<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<SUB<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<SUB<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<SUB<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<SUB<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<SUB<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public SUB<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<SUB<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<SUB<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public SUB<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class ACRONYM<T extends _> extends EImp<T> implements HamletSpec.ACRONYM {
+ public ACRONYM(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public ACRONYM<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public ACRONYM<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public ACRONYM<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public ACRONYM<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public ACRONYM<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public ACRONYM<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public ACRONYM<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public ACRONYM<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public ACRONYM<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public ACRONYM<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public ACRONYM<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public ACRONYM<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public ACRONYM<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public ACRONYM<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public ACRONYM<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public ACRONYM<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public ACRONYM<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public ACRONYM<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<ACRONYM<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public ACRONYM<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<ACRONYM<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public ACRONYM<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<ACRONYM<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public ACRONYM<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ACRONYM<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<ACRONYM<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<ACRONYM<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public ACRONYM<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<ACRONYM<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public ACRONYM<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<ACRONYM<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public ACRONYM<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ACRONYM<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<ACRONYM<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<ACRONYM<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public ACRONYM<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<ACRONYM<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public ACRONYM<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<ACRONYM<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public ACRONYM<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<ACRONYM<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public ACRONYM<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<ACRONYM<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<ACRONYM<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public ACRONYM<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public ACRONYM<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<ACRONYM<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<ACRONYM<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<ACRONYM<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<ACRONYM<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public ACRONYM<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<ACRONYM<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public ACRONYM<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<ACRONYM<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<ACRONYM<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public ACRONYM<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public ACRONYM<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<ACRONYM<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<ACRONYM<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<ACRONYM<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<ACRONYM<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public ACRONYM<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<ACRONYM<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<ACRONYM<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<ACRONYM<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<ACRONYM<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<ACRONYM<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<ACRONYM<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<ACRONYM<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<ACRONYM<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<ACRONYM<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<ACRONYM<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public ACRONYM<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<ACRONYM<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<ACRONYM<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public ACRONYM<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class ABBR<T extends _> extends EImp<T> implements HamletSpec.ABBR {
+ public ABBR(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public ABBR<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public ABBR<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public ABBR<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public ABBR<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public ABBR<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public ABBR<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public ABBR<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public ABBR<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public ABBR<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public ABBR<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public ABBR<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public ABBR<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public ABBR<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public ABBR<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public ABBR<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public ABBR<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public ABBR<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public ABBR<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<ABBR<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public ABBR<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public ABBR<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<ABBR<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public ABBR<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public ABBR<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<ABBR<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public ABBR<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public ABBR<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<ABBR<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public ABBR<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<ABBR<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public ABBR<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public ABBR<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<ABBR<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public ABBR<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public ABBR<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<ABBR<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public ABBR<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public ABBR<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<ABBR<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public ABBR<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<ABBR<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public ABBR<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public ABBR<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<ABBR<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public ABBR<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public ABBR<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<ABBR<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public ABBR<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public ABBR<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<ABBR<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public ABBR<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public ABBR<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<ABBR<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<ABBR<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public ABBR<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public ABBR<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<ABBR<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public ABBR<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<ABBR<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<ABBR<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<ABBR<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public ABBR<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public ABBR<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<ABBR<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public ABBR<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public ABBR<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<ABBR<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<ABBR<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public ABBR<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public ABBR<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<ABBR<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<ABBR<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public ABBR<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<ABBR<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public ABBR<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<ABBR<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public ABBR<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public ABBR<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<ABBR<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public ABBR<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<ABBR<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public ABBR<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<ABBR<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public ABBR<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<ABBR<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public ABBR<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<ABBR<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<ABBR<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<ABBR<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<ABBR<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<ABBR<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<ABBR<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public ABBR<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<ABBR<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<ABBR<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public ABBR<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class CITE<T extends _> extends EImp<T> implements HamletSpec.CITE {
+ public CITE(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public CITE<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public CITE<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public CITE<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public CITE<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public CITE<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public CITE<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public CITE<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public CITE<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public CITE<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public CITE<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public CITE<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public CITE<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public CITE<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public CITE<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public CITE<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public CITE<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public CITE<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public CITE<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<CITE<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public CITE<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public CITE<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<CITE<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public CITE<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public CITE<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<CITE<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public CITE<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public CITE<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<CITE<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public CITE<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<CITE<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public CITE<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public CITE<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<CITE<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public CITE<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public CITE<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<CITE<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public CITE<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public CITE<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<CITE<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public CITE<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<CITE<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public CITE<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public CITE<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<CITE<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public CITE<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public CITE<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<CITE<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public CITE<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public CITE<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<CITE<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public CITE<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public CITE<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<CITE<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<CITE<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public CITE<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public CITE<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<CITE<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public CITE<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<CITE<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<CITE<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<CITE<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public CITE<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public CITE<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<CITE<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public CITE<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public CITE<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<CITE<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<CITE<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public CITE<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public CITE<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<CITE<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<CITE<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public CITE<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<CITE<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public CITE<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<CITE<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public CITE<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public CITE<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<CITE<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public CITE<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<CITE<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public CITE<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<CITE<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public CITE<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<CITE<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public CITE<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<CITE<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<CITE<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<CITE<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<CITE<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<CITE<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<CITE<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public CITE<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<CITE<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<CITE<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public CITE<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class VAR<T extends _> extends EImp<T> implements HamletSpec.VAR {
+ public VAR(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public VAR<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public VAR<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public VAR<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public VAR<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public VAR<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public VAR<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public VAR<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public VAR<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public VAR<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public VAR<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public VAR<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public VAR<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public VAR<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public VAR<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public VAR<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public VAR<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public VAR<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public VAR<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<VAR<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public VAR<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public VAR<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<VAR<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public VAR<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public VAR<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<VAR<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public VAR<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public VAR<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<VAR<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public VAR<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<VAR<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public VAR<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public VAR<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<VAR<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public VAR<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public VAR<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<VAR<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public VAR<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public VAR<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<VAR<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public VAR<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<VAR<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public VAR<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public VAR<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<VAR<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public VAR<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public VAR<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<VAR<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public VAR<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public VAR<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<VAR<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public VAR<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public VAR<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<VAR<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<VAR<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public VAR<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public VAR<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<VAR<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public VAR<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<VAR<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<VAR<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<VAR<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public VAR<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public VAR<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<VAR<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public VAR<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public VAR<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<VAR<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<VAR<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public VAR<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public VAR<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<VAR<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<VAR<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public VAR<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<VAR<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public VAR<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<VAR<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public VAR<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public VAR<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<VAR<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public VAR<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<VAR<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public VAR<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<VAR<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public VAR<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<VAR<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public VAR<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<VAR<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<VAR<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<VAR<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<VAR<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<VAR<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<VAR<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public VAR<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<VAR<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<VAR<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public VAR<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class KBD<T extends _> extends EImp<T> implements HamletSpec.KBD {
+ public KBD(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public KBD<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public KBD<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public KBD<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public KBD<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public KBD<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public KBD<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public KBD<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public KBD<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public KBD<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public KBD<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public KBD<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public KBD<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public KBD<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public KBD<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public KBD<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public KBD<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public KBD<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public KBD<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<KBD<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public KBD<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public KBD<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<KBD<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public KBD<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public KBD<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<KBD<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public KBD<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public KBD<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<KBD<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public KBD<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<KBD<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public KBD<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public KBD<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<KBD<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public KBD<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public KBD<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<KBD<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public KBD<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public KBD<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<KBD<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public KBD<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<KBD<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public KBD<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public KBD<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<KBD<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public KBD<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public KBD<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<KBD<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public KBD<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public KBD<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<KBD<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public KBD<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public KBD<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<KBD<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<KBD<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public KBD<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public KBD<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<KBD<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public KBD<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<KBD<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<KBD<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<KBD<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public KBD<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public KBD<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<KBD<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public KBD<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public KBD<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<KBD<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<KBD<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public KBD<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public KBD<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<KBD<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<KBD<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public KBD<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<KBD<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public KBD<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<KBD<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public KBD<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public KBD<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<KBD<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public KBD<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<KBD<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public KBD<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<KBD<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public KBD<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<KBD<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public KBD<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<KBD<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<KBD<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<KBD<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<KBD<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<KBD<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<KBD<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public KBD<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<KBD<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<KBD<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public KBD<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class SAMP<T extends _> extends EImp<T> implements HamletSpec.SAMP {
+ public SAMP(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public SAMP<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public SAMP<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public SAMP<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public SAMP<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public SAMP<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public SAMP<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public SAMP<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public SAMP<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public SAMP<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public SAMP<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public SAMP<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public SAMP<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public SAMP<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public SAMP<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public SAMP<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public SAMP<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public SAMP<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public SAMP<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<SAMP<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public SAMP<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<SAMP<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public SAMP<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<SAMP<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public SAMP<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SAMP<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<SAMP<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public SAMP<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<SAMP<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public SAMP<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<SAMP<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public SAMP<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<SAMP<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public SAMP<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SAMP<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<SAMP<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public SAMP<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<SAMP<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public SAMP<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<SAMP<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public SAMP<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<SAMP<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public SAMP<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<SAMP<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public SAMP<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<SAMP<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<SAMP<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public SAMP<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public SAMP<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<SAMP<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public SAMP<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<SAMP<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<SAMP<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<SAMP<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public SAMP<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<SAMP<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public SAMP<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<SAMP<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<SAMP<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public SAMP<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<SAMP<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<SAMP<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public SAMP<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<SAMP<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public SAMP<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<SAMP<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public SAMP<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<SAMP<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public SAMP<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<SAMP<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public SAMP<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<SAMP<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public SAMP<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<SAMP<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public SAMP<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<SAMP<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<SAMP<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<SAMP<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<SAMP<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<SAMP<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<SAMP<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public SAMP<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<SAMP<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<SAMP<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public SAMP<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class CODE<T extends _> extends EImp<T> implements HamletSpec.CODE {
+ public CODE(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public CODE<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public CODE<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public CODE<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public CODE<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public CODE<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public CODE<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public CODE<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public CODE<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public CODE<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public CODE<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public CODE<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public CODE<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public CODE<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public CODE<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public CODE<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public CODE<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public CODE<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public CODE<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<CODE<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public CODE<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public CODE<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<CODE<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public CODE<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public CODE<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<CODE<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public CODE<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public CODE<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<CODE<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public CODE<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<CODE<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public CODE<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public CODE<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<CODE<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public CODE<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public CODE<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<CODE<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public CODE<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public CODE<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<CODE<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public CODE<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<CODE<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public CODE<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public CODE<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<CODE<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public CODE<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public CODE<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<CODE<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public CODE<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public CODE<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<CODE<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public CODE<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public CODE<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<CODE<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<CODE<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public CODE<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public CODE<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<CODE<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public CODE<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<CODE<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<CODE<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<CODE<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public CODE<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public CODE<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<CODE<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public CODE<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public CODE<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<CODE<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<CODE<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public CODE<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public CODE<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<CODE<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<CODE<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public CODE<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<CODE<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public CODE<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<CODE<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public CODE<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public CODE<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<CODE<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public CODE<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<CODE<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public CODE<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<CODE<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public CODE<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<CODE<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public CODE<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<CODE<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<CODE<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<CODE<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<CODE<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<CODE<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<CODE<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public CODE<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<CODE<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<CODE<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public CODE<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class DFN<T extends _> extends EImp<T> implements HamletSpec.DFN {
+ public DFN(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public DFN<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public DFN<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public DFN<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public DFN<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public DFN<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public DFN<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public DFN<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public DFN<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public DFN<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public DFN<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public DFN<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public DFN<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public DFN<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public DFN<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public DFN<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public DFN<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public DFN<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public DFN<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<DFN<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public DFN<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public DFN<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<DFN<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public DFN<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public DFN<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<DFN<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public DFN<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public DFN<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<DFN<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public DFN<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<DFN<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public DFN<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public DFN<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<DFN<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public DFN<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public DFN<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<DFN<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public DFN<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public DFN<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<DFN<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public DFN<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<DFN<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public DFN<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public DFN<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<DFN<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public DFN<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public DFN<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<DFN<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public DFN<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public DFN<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<DFN<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public DFN<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public DFN<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<DFN<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<DFN<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public DFN<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public DFN<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<DFN<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public DFN<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<DFN<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<DFN<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<DFN<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public DFN<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public DFN<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<DFN<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public DFN<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public DFN<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<DFN<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<DFN<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public DFN<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public DFN<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<DFN<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<DFN<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public DFN<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<DFN<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public DFN<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<DFN<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public DFN<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public DFN<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<DFN<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public DFN<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<DFN<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public DFN<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<DFN<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public DFN<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<DFN<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public DFN<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<DFN<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<DFN<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<DFN<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<DFN<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<DFN<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<DFN<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public DFN<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<DFN<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<DFN<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public DFN<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class STRONG<T extends _> extends EImp<T> implements HamletSpec.STRONG {
+ public STRONG(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public STRONG<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public STRONG<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public STRONG<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public STRONG<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public STRONG<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public STRONG<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public STRONG<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public STRONG<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public STRONG<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public STRONG<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public STRONG<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public STRONG<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public STRONG<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public STRONG<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public STRONG<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public STRONG<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public STRONG<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public STRONG<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<STRONG<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public STRONG<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public STRONG<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<STRONG<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public STRONG<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public STRONG<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<STRONG<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public STRONG<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public STRONG<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<STRONG<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public STRONG<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<STRONG<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public STRONG<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public STRONG<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<STRONG<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public STRONG<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public STRONG<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<STRONG<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public STRONG<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public STRONG<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<STRONG<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public STRONG<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<STRONG<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public STRONG<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public STRONG<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<STRONG<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public STRONG<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public STRONG<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<STRONG<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public STRONG<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public STRONG<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<STRONG<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public STRONG<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public STRONG<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<STRONG<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<STRONG<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public STRONG<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public STRONG<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<STRONG<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public STRONG<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<STRONG<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<STRONG<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<STRONG<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public STRONG<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public STRONG<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<STRONG<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public STRONG<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public STRONG<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<STRONG<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<STRONG<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public STRONG<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public STRONG<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<STRONG<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<STRONG<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public STRONG<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<STRONG<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public STRONG<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<STRONG<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public STRONG<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public STRONG<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<STRONG<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public STRONG<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<STRONG<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public STRONG<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<STRONG<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public STRONG<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<STRONG<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public STRONG<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<STRONG<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<STRONG<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<STRONG<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<STRONG<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<STRONG<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<STRONG<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public STRONG<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<STRONG<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<STRONG<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public STRONG<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class EM<T extends _> extends EImp<T> implements HamletSpec.EM {
+ public EM(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public EM<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public EM<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public EM<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public EM<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public EM<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public EM<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public EM<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public EM<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public EM<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public EM<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public EM<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public EM<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public EM<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public EM<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public EM<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public EM<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public EM<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public EM<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<EM<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public EM<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public EM<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<EM<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public EM<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public EM<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<EM<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public EM<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public EM<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public EM<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<EM<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public EM<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<EM<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public EM<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public EM<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<EM<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public EM<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public EM<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<EM<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public EM<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public EM<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public EM<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<EM<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public EM<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<EM<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public EM<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public EM<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<EM<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public EM<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public EM<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<EM<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public EM<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public EM<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<EM<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public EM<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public EM<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<EM<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<EM<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public EM<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public EM<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<EM<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public EM<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<EM<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<EM<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<EM<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public EM<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public EM<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<EM<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public EM<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public EM<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<EM<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<EM<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public EM<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public EM<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<EM<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<EM<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public EM<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<EM<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public EM<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<EM<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public EM<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public EM<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<EM<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public EM<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<EM<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public EM<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<EM<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public EM<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<EM<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public EM<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<EM<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<EM<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<EM<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<EM<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<EM<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<EM<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public EM<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<EM<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<EM<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public EM<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class SMALL<T extends _> extends EImp<T> implements HamletSpec.SMALL {
+ public SMALL(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public SMALL<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public SMALL<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public SMALL<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public SMALL<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public SMALL<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public SMALL<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public SMALL<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public SMALL<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public SMALL<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public SMALL<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public SMALL<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public SMALL<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public SMALL<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public SMALL<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public SMALL<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public SMALL<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public SMALL<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public SMALL<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<SMALL<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public SMALL<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public SMALL<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<SMALL<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public SMALL<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public SMALL<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<SMALL<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public SMALL<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public SMALL<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<SMALL<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public SMALL<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<SMALL<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public SMALL<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public SMALL<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<SMALL<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public SMALL<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public SMALL<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<SMALL<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public SMALL<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public SMALL<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<SMALL<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public SMALL<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<SMALL<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public SMALL<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public SMALL<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<SMALL<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public SMALL<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public SMALL<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<SMALL<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public SMALL<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public SMALL<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<SMALL<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public SMALL<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public SMALL<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<SMALL<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<SMALL<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public SMALL<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public SMALL<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<SMALL<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public SMALL<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<SMALL<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<SMALL<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<SMALL<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public SMALL<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public SMALL<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<SMALL<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public SMALL<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public SMALL<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<SMALL<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<SMALL<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public SMALL<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public SMALL<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<SMALL<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<SMALL<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public SMALL<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<SMALL<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public SMALL<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<SMALL<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public SMALL<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public SMALL<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<SMALL<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public SMALL<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<SMALL<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public SMALL<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<SMALL<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public SMALL<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<SMALL<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public SMALL<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<SMALL<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<SMALL<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<SMALL<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<SMALL<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<SMALL<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<SMALL<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public SMALL<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<SMALL<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<SMALL<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public SMALL<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class B<T extends _> extends EImp<T> implements HamletSpec.B {
+ public B(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public B<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public B<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public B<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public B<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public B<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public B<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public B<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public B<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public B<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public B<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public B<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public B<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public B<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public B<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public B<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public B<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public B<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public B<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<B<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public B<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public B<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<B<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public B<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public B<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<B<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public B<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public B<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public B<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<B<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public B<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<B<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public B<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public B<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<B<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public B<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public B<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<B<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public B<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public B<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public B<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<B<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public B<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<B<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public B<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public B<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<B<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public B<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public B<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<B<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public B<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public B<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<B<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public B<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public B<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<B<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<B<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public B<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public B<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<B<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public B<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<B<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<B<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<B<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public B<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public B<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<B<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public B<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public B<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<B<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<B<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public B<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public B<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<B<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<B<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public B<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<B<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public B<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<B<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public B<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public B<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<B<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public B<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<B<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public B<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<B<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public B<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<B<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public B<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<B<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<B<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<B<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<B<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<B<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<B<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public B<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<B<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<B<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public B<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ public class I<T extends _> extends EImp<T> implements HamletSpec.I {
+ public I(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ @Override
+ public I<T> $id(String value) {
+ addAttr("id", value);
+ return this;
+ }
+
+ @Override
+ public I<T> $class(String value) {
+ addAttr("class", value);
+ return this;
+ }
+
+ @Override
+ public I<T> $title(String value) {
+ addAttr("title", value);
+ return this;
+ }
+
+ @Override
+ public I<T> $style(String value) {
+ addAttr("style", value);
+ return this;
+ }
+
+ @Override
+ public I<T> $lang(String value) {
+ addAttr("lang", value);
+ return this;
+ }
+
+ @Override
+ public I<T> $dir(Dir value) {
+ addAttr("dir", value);
+ return this;
+ }
+
+ @Override
+ public I<T> $onclick(String value) {
+ addAttr("onclick", value);
+ return this;
+ }
+
+ @Override
+ public I<T> $ondblclick(String value) {
+ addAttr("ondblclick", value);
+ return this;
+ }
+
+ @Override
+ public I<T> $onmousedown(String value) {
+ addAttr("onmousedown", value);
+ return this;
+ }
+
+ @Override
+ public I<T> $onmouseup(String value) {
+ addAttr("onmouseup", value);
+ return this;
+ }
+
+ @Override
+ public I<T> $onmouseover(String value) {
+ addAttr("onmouseover", value);
+ return this;
+ }
+
+ @Override
+ public I<T> $onmousemove(String value) {
+ addAttr("onmousemove", value);
+ return this;
+ }
+
+ @Override
+ public I<T> $onmouseout(String value) {
+ addAttr("onmouseout", value);
+ return this;
+ }
+
+ @Override
+ public I<T> $onkeypress(String value) {
+ addAttr("onkeypress", value);
+ return this;
+ }
+
+ @Override
+ public I<T> $onkeydown(String value) {
+ addAttr("onkeydown", value);
+ return this;
+ }
+
+ @Override
+ public I<T> $onkeyup(String value) {
+ addAttr("onkeyup", value);
+ return this;
+ }
+
+ @Override
+ public I<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public I<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+
+ @Override
+ public B<I<T>> b() {
+ closeAttrs();
+ return b_(this, true);
+ }
+
+ @Override
+ public I<T> b(String cdata) {
+ return b()._(cdata)._();
+ }
+
+ @Override
+ public I<T> b(String selector, String cdata) {
+ return setSelector(b(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<I<T>> i() {
+ closeAttrs();
+ return i_(this, true);
+ }
+
+ @Override
+ public I<T> i(String cdata) {
+ return i()._(cdata)._();
+ }
+
+ @Override
+ public I<T> i(String selector, String cdata) {
+ return setSelector(i(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SMALL<I<T>> small() {
+ closeAttrs();
+ return small_(this, true);
+ }
+
+ @Override
+ public I<T> small(String cdata) {
+ return small()._(cdata)._();
+ }
+
+ @Override
+ public I<T> small(String selector, String cdata) {
+ return setSelector(small(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<T> em(String cdata) {
+ return em()._(cdata)._();
+ }
+
+ @Override
+ public EM<I<T>> em() {
+ closeAttrs();
+ return em_(this, true);
+ }
+
+ @Override
+ public I<T> em(String selector, String cdata) {
+ return setSelector(em(), selector)._(cdata)._();
+ }
+
+ @Override
+ public STRONG<I<T>> strong() {
+ closeAttrs();
+ return strong_(this, true);
+ }
+
+ @Override
+ public I<T> strong(String cdata) {
+ return strong()._(cdata)._();
+ }
+
+ @Override
+ public I<T> strong(String selector, String cdata) {
+ return setSelector(strong(), selector)._(cdata)._();
+ }
+
+ @Override
+ public DFN<I<T>> dfn() {
+ closeAttrs();
+ return dfn_(this, true);
+ }
+
+ @Override
+ public I<T> dfn(String cdata) {
+ return dfn()._(cdata)._();
+ }
+
+ @Override
+ public I<T> dfn(String selector, String cdata) {
+ return setSelector(dfn(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CODE<I<T>> code() {
+ closeAttrs();
+ return code_(this, true);
+ }
+
+ @Override
+ public I<T> code(String cdata) {
+ return code()._(cdata)._();
+ }
+
+ @Override
+ public I<T> code(String selector, String cdata) {
+ return setSelector(code(), selector)._(cdata)._();
+ }
+
+ @Override
+ public I<T> samp(String cdata) {
+ return samp()._(cdata)._();
+ }
+
+ @Override
+ public SAMP<I<T>> samp() {
+ closeAttrs();
+ return samp_(this, true);
+ }
+
+ @Override
+ public I<T> samp(String selector, String cdata) {
+ return setSelector(samp(), selector)._(cdata)._();
+ }
+
+ @Override
+ public KBD<I<T>> kbd() {
+ closeAttrs();
+ return kbd_(this, true);
+ }
+
+ @Override
+ public I<T> kbd(String cdata) {
+ return kbd()._(cdata)._();
+ }
+
+ @Override
+ public I<T> kbd(String selector, String cdata) {
+ return setSelector(kbd(), selector)._(cdata)._();
+ }
+
+ @Override
+ public VAR<I<T>> var() {
+ closeAttrs();
+ return var_(this, true);
+ }
+
+ @Override
+ public I<T> var(String cdata) {
+ return var()._(cdata)._();
+ }
+
+ @Override
+ public I<T> var(String selector, String cdata) {
+ return setSelector(var(), selector)._(cdata)._();
+ }
+
+ @Override
+ public CITE<I<T>> cite() {
+ closeAttrs();
+ return cite_(this, true);
+ }
+
+ @Override
+ public I<T> cite(String cdata) {
+ return cite()._(cdata)._();
+ }
+
+ @Override
+ public I<T> cite(String selector, String cdata) {
+ return setSelector(cite(), selector)._(cdata)._();
+ }
+
+ @Override
+ public ABBR<I<T>> abbr() {
+ closeAttrs();
+ return abbr_(this, true);
+ }
+
+ @Override
+ public I<T> abbr(String cdata) {
+ return abbr()._(cdata)._();
+ }
+
+ @Override
+ public I<T> abbr(String selector, String cdata) {
+ return setSelector(abbr(), selector)._(cdata)._();
+ }
+
+ @Override
+ public A<I<T>> a() {
+ closeAttrs();
+ return a_(this, true);
+ }
+
+ @Override
+ public A<I<T>> a(String selector) {
+ return setSelector(a(), selector);
+ }
+
+ @Override
+ public I<T> a(String href, String anchorText) {
+ return a().$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public I<T> a(String selector, String href, String anchorText) {
+ return setSelector(a(), selector).$href(href)._(anchorText)._();
+ }
+
+ @Override
+ public IMG<I<T>> img() {
+ closeAttrs();
+ return img_(this, true);
+ }
+
+ @Override
+ public I<T> img(String src) {
+ return img().$src(src)._();
+ }
+
+ @Override
+ public OBJECT<I<T>> object() {
+ closeAttrs();
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<I<T>> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public SUB<I<T>> sub() {
+ closeAttrs();
+ return sub_(this, true);
+ }
+
+ @Override
+ public I<T> sub(String cdata) {
+ return sub()._(cdata)._();
+ }
+
+ @Override
+ public I<T> sub(String selector, String cdata) {
+ return setSelector(sub(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SUP<I<T>> sup() {
+ closeAttrs();
+ return sup_(this, true);
+ }
+
+ @Override
+ public I<T> sup(String cdata) {
+ return sup()._(cdata)._();
+ }
+
+ @Override
+ public I<T> sup(String selector, String cdata) {
+ return setSelector(sup(), selector)._(cdata)._();
+ }
+
+ @Override
+ public MAP<I<T>> map() {
+ closeAttrs();
+ return map_(this, true);
+ }
+
+ @Override
+ public MAP<I<T>> map(String selector) {
+ return setSelector(map(), selector);
+ }
+
+ @Override
+ public I<T> q(String cdata) {
+ return q()._(cdata)._();
+ }
+
+ @Override
+ public I<T> q(String selector, String cdata) {
+ return setSelector(q(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Q<I<T>> q() {
+ closeAttrs();
+ return q_(this, true);
+ }
+
+ @Override
+ public BR<I<T>> br() {
+ closeAttrs();
+ return br_(this, true);
+ }
+
+ @Override
+ public I<T> br(String selector) {
+ return setSelector(br(), selector)._();
+ }
+
+ @Override
+ public BDO<I<T>> bdo() {
+ closeAttrs();
+ return bdo_(this, true);
+ }
+
+ @Override
+ public I<T> bdo(Dir dir, String cdata) {
+ return bdo().$dir(dir)._(cdata)._();
+ }
+
+ @Override
+ public SPAN<I<T>> span() {
+ closeAttrs();
+ return span_(this, true);
+ }
+
+ @Override
+ public I<T> span(String cdata) {
+ return span()._(cdata)._();
+ }
+
+ @Override
+ public I<T> span(String selector, String cdata) {
+ return setSelector(span(), selector)._(cdata)._();
+ }
+
+ @Override
+ public SCRIPT<I<T>> script() {
+ closeAttrs();
+ return script_(this, true);
+ }
+
+ @Override
+ public I<T> script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public INS<I<T>> ins() {
+ closeAttrs();
+ return ins_(this, true);
+ }
+
+ @Override
+ public I<T> ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<I<T>> del() {
+ closeAttrs();
+ return del_(this, true);
+ }
+
+ @Override
+ public I<T> del(String cdata) {
+ return del()._(cdata)._();
+ }
+
+ @Override
+ public LABEL<I<T>> label() {
+ closeAttrs();
+ return label_(this, true);
+ }
+
+ @Override
+ public I<T> label(String forId, String cdata) {
+ return label().$for(forId)._(cdata)._();
+ }
+
+ @Override
+ public INPUT<I<T>> input(String selector) {
+ return setSelector(input(), selector);
+ }
+
+ @Override
+ public INPUT<I<T>> input() {
+ closeAttrs();
+ return input_(this, true);
+ }
+
+ @Override
+ public SELECT<I<T>> select() {
+ closeAttrs();
+ return select_(this, true);
+ }
+
+ @Override
+ public SELECT<I<T>> select(String selector) {
+ return setSelector(select(), selector);
+ }
+
+ @Override
+ public TEXTAREA<I<T>> textarea(String selector) {
+ return setSelector(textarea(), selector);
+ }
+
+ @Override
+ public TEXTAREA<I<T>> textarea() {
+ closeAttrs();
+ return textarea_(this, true);
+ }
+
+ @Override
+ public I<T> textarea(String selector, String cdata) {
+ return setSelector(textarea(), selector)._(cdata)._();
+ }
+
+ @Override
+ public BUTTON<I<T>> button() {
+ closeAttrs();
+ return button_(this, true);
+ }
+
+ @Override
+ public BUTTON<I<T>> button(String selector) {
+ return setSelector(button(), selector);
+ }
+
+ @Override
+ public I<T> button(String selector, String cdata) {
+ return setSelector(button(), selector)._(cdata)._();
+ }
+ }
+
+ private <T extends _> INPUT<T> input_(T e, boolean inline) {
+ return new INPUT<T>("input", e, opt(false, inline, false)); }
+
+ private <T extends _> SELECT<T> select_(T e, boolean inline) {
+ return new SELECT<T>("select", e, opt(true, inline, false)); }
+
+ private <T extends _> TEXTAREA<T> textarea_(T e, boolean inline) {
+ return new TEXTAREA<T>("textarea", e, opt(true, inline, false)); }
+
+ private <T extends _> BUTTON<T> button_(T e, boolean inline) {
+ return new BUTTON<T>("button", e, opt(true, inline, false)); }
+
+ private <T extends _> LABEL<T> label_(T e, boolean inline) {
+ return new LABEL<T>("label", e, opt(true, inline, false)); }
+
+ private <T extends _> MAP<T> map_(T e, boolean inline) {
+ return new MAP<T>("map", e, opt(true, inline, false)); }
+
+ private <T extends _> Q<T> q_(T e, boolean inline) {
+ return new Q<T>("q", e, opt(true, inline, false)); }
+
+ private <T extends _> BR<T> br_(T e, boolean inline) {
+ return new BR<T>("br", e, opt(false, inline, false)); }
+
+ private <T extends _> BDO<T> bdo_(T e, boolean inline) {
+ return new BDO<T>("bdo", e, opt(true, inline, false)); }
+
+ private <T extends _> SPAN<T> span_(T e, boolean inline) {
+ return new SPAN<T>("span", e, opt(true, inline, false)); }
+
+ private <T extends _> INS<T> ins_(T e, boolean inline) {
+ return new INS<T>("ins", e, opt(true, inline, false)); }
+
+ private <T extends _> DEL<T> del_(T e, boolean inline) {
+ return new DEL<T>("del", e, opt(true, inline, false)); }
+
+ private <T extends _> A<T> a_(T e, boolean inline) {
+ return new A<T>("a", e, opt(true, inline, false)); }
+
+ private <T extends _> SUB<T> sub_(T e, boolean inline) {
+ return new SUB<T>("sub", e, opt(true, inline, false)); }
+
+ private <T extends _> SUP<T> sup_(T e, boolean inline) {
+ return new SUP<T>("sup", e, opt(true, inline, false)); }
+
+ private <T extends _> IMG<T> img_(T e, boolean inline) {
+ return new IMG<T>("img", e, opt(false, inline, false)); }
+
+ private <T extends _> EM<T> em_(T e, boolean inline) {
+ return new EM<T>("em", e, opt(true, inline, false)); }
+
+ private <T extends _> STRONG<T> strong_(T e, boolean inline) {
+ return new STRONG<T>("strong", e, opt(true, inline, false)); }
+
+ private <T extends _> DFN<T> dfn_(T e, boolean inline) {
+ return new DFN<T>("dfn", e, opt(true, inline, false)); }
+
+ private <T extends _> CODE<T> code_(T e, boolean inline) {
+ return new CODE<T>("code", e, opt(true, inline, false)); }
+
+ private <T extends _> SAMP<T> samp_(T e, boolean inline) {
+ return new SAMP<T>("samp", e, opt(true, inline, false)); }
+
+ private <T extends _> KBD<T> kbd_(T e, boolean inline) {
+ return new KBD<T>("kbd", e, opt(true, inline, false)); }
+
+ private <T extends _> VAR<T> var_(T e, boolean inline) {
+ return new VAR<T>("var", e, opt(true, inline, false)); }
+
+ private <T extends _> CITE<T> cite_(T e, boolean inline) {
+ return new CITE<T>("cite", e, opt(true, inline, false)); }
+
+ private <T extends _> ABBR<T> abbr_(T e, boolean inline) {
+ return new ABBR<T>("abbr", e, opt(true, inline, false)); }
+
+ private <T extends _> B<T> b_(T e, boolean inline) {
+ return new B<T>("b", e, opt(true, inline, false)); }
+
+ private <T extends _> I<T> i_(T e, boolean inline) {
+ return new I<T>("i", e, opt(true, inline, false)); }
+
+ private <T extends _> SMALL<T> small_(T e, boolean inline) {
+ return new SMALL<T>("small", e, opt(true, inline, false)); }
+
+ private <T extends _> PRE<T> pre_(T e, boolean inline) {
+ return new PRE<T>("pre", e, opt(true, inline, true)); }
+
+ private <T extends _> UL<T> ul_(T e, boolean inline) {
+ return new UL<T>("ul", e, opt(true, inline, false)); }
+
+ private <T extends _> OL<T> ol_(T e, boolean inline) {
+ return new OL<T>("ol", e, opt(true, inline, false)); }
+
+ private <T extends _> H1<T> h1_(T e, boolean inline) {
+ return new H1<T>("h1", e, opt(true, inline, false)); }
+
+ private <T extends _> H2<T> h2_(T e, boolean inline) {
+ return new H2<T>("h2", e, opt(true, inline, false)); }
+
+ private <T extends _> H3<T> h3_(T e, boolean inline) {
+ return new H3<T>("h3", e, opt(true, inline, false)); }
+
+ private <T extends _> H4<T> h4_(T e, boolean inline) {
+ return new H4<T>("h4", e, opt(true, inline, false)); }
+
+ private <T extends _> H5<T> h5_(T e, boolean inline) {
+ return new H5<T>("h5", e, opt(true, inline, false)); }
+
+ private <T extends _> H6<T> h6_(T e, boolean inline) {
+ return new H6<T>("h6", e, opt(true, inline, false)); }
+
+ private <T extends _> STYLE<T> style_(T e, boolean inline) {
+ return new STYLE<T>("style", e, opt(true, inline, false)); }
+
+ private <T extends _> LINK<T> link_(T e, boolean inline) {
+ return new LINK<T>("link", e, opt(false, inline, false)); }
+
+ private <T extends _> META<T> meta_(T e, boolean inline) {
+ return new META<T>("meta", e, opt(false, inline, false)); }
+
+ private <T extends _> OBJECT<T> object_(T e, boolean inline) {
+ return new OBJECT<T>("object", e, opt(true, inline, false)); }
+
+ private <T extends _> SCRIPT<T> script_(T e, boolean inline) {
+ return new SCRIPT<T>("script", e, opt(true, inline, false)); }
+
+ @Override
+ public HEAD<Hamlet> head() {
+ return head_(this, false);
+ }
+
+ @Override
+ public BODY<Hamlet> body() {
+ return body_(this, false);
+ }
+
+ @Override
+ public BODY<Hamlet> body(String selector) {
+ return setSelector(body(), selector);
+ }
+
+ @Override
+ public BASE<Hamlet> base() {
+ return base_(this, false);
+ }
+
+ @Override
+ public Hamlet base(String href) {
+ return base().$href(href)._();
+ }
+
+ @Override
+ public TITLE<Hamlet> title() {
+ return title_(this, false);
+ }
+
+ @Override
+ public Hamlet title(String cdata) {
+ return title()._(cdata)._();
+ }
+
+ @Override
+ public STYLE<Hamlet> style() {
+ return style_(this, false);
+ }
+
+ @Override
+ public Hamlet style(Object... lines) {
+ return style().$type("text/css")._(lines)._();
+ }
+
+ @Override
+ public LINK<Hamlet> link() {
+ return link_(this, false);
+ }
+
+ @Override
+ public Hamlet link(String href) {
+ return setLinkHref(link(), href)._();
+ }
+
+ @Override
+ public META<Hamlet> meta() {
+ return meta_(this, false);
+ }
+
+ @Override
+ public Hamlet meta(String name, String content) {
+ return meta().$name(name).$content(content)._();
+ }
+
+ @Override
+ public Hamlet meta_http(String header, String content) {
+ return meta().$http_equiv(header).$content(content)._();
+ }
+
+ @Override
+ public SCRIPT<Hamlet> script() {
+ return script_(this, false);
+ }
+
+ @Override
+ public Hamlet script(String src) {
+ return setScriptSrc(script(), src)._();
+ }
+
+ @Override
+ public OBJECT<Hamlet> object() {
+ return object_(this, true);
+ }
+
+ @Override
+ public OBJECT<Hamlet> object(String selector) {
+ return setSelector(object(), selector);
+ }
+
+ @Override
+ public TABLE<Hamlet> table() {
+ return table_(this, false);
+ }
+
+ @Override
+ public TABLE<Hamlet> table(String selector) {
+ return setSelector(table(), selector);
+ }
+
+ @Override
+ public Hamlet address(String cdata) {
+ return address()._(cdata)._();
+ }
+
+ @Override
+ public ADDRESS<Hamlet> address() {
+ return address_(this, false);
+ }
+
+ @Override
+ public P<Hamlet> p(String selector) {
+ return setSelector(p(), selector);
+ }
+
+ @Override
+ public P<Hamlet> p() {
+ return p_(this, false);
+ }
+
+ @Override
+ public Hamlet _(Class<? extends SubView> cls) {
+ subView(cls);
+ return this;
+ }
+
+ @Override
+ public HR<Hamlet> hr() {
+ return hr_(this, false);
+ }
+
+ @Override
+ public Hamlet hr(String selector) {
+ return setSelector(hr(), selector)._();
+ }
+
+ @Override
+ public DL<Hamlet> dl(String selector) {
+ return setSelector(dl(), selector);
+ }
+
+ @Override
+ public DL<Hamlet> dl() {
+ return dl_(this, false);
+ }
+
+ @Override
+ public DIV<Hamlet> div(String selector) {
+ return setSelector(div(), selector);
+ }
+
+ @Override
+ public DIV<Hamlet> div() {
+ return div_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<Hamlet> blockquote() {
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public BLOCKQUOTE<Hamlet> bq() {
+ return blockquote_(this, false);
+ }
+
+ @Override
+ public Hamlet h1(String cdata) {
+ return h1()._(cdata)._();
+ }
+
+ @Override
+ public H1<Hamlet> h1() {
+ return h1_(this, false);
+ }
+
+ @Override
+ public Hamlet h1(String selector, String cdata) {
+ return setSelector(h1(), selector)._(cdata)._();
+ }
+
+ @Override
+ public Hamlet h2(String cdata) {
+ return h2()._(cdata)._();
+ }
+
+ @Override
+ public H2<Hamlet> h2() {
+ return h2_(this, false);
+ }
+
+ @Override
+ public Hamlet h2(String selector, String cdata) {
+ return setSelector(h2(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H3<Hamlet> h3() {
+ return h3_(this, false);
+ }
+
+ @Override
+ public Hamlet h3(String cdata) {
+ return h3()._(cdata)._();
+ }
+
+ @Override
+ public Hamlet h3(String selector, String cdata) {
+ return setSelector(h3(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H4<Hamlet> h4() {
+ return h4_(this, false);
+ }
+
+ @Override
+ public Hamlet h4(String cdata) {
+ return h4()._(cdata)._();
+ }
+
+ @Override
+ public Hamlet h4(String selector, String cdata) {
+ return setSelector(h4(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H5<Hamlet> h5() {
+ return h5_(this, false);
+ }
+
+ @Override
+ public Hamlet h5(String cdata) {
+ return h5()._(cdata)._();
+ }
+
+ @Override
+ public Hamlet h5(String selector, String cdata) {
+ return setSelector(h5(), selector)._(cdata)._();
+ }
+
+ @Override
+ public H6<Hamlet> h6() {
+ return h6_(this, false);
+ }
+
+ @Override
+ public Hamlet h6(String cdata) {
+ return h6()._(cdata)._();
+ }
+
+ @Override
+ public Hamlet h6(String selector, String cdata) {
+ return setSelector(h6(), selector)._(cdata)._();
+ }
+
+ @Override
+ public UL<Hamlet> ul() {
+ return ul_(this, false);
+ }
+
+ @Override
+ public UL<Hamlet> ul(String selector) {
+ return setSelector(ul(), selector);
+ }
+
+ @Override
+ public OL<Hamlet> ol() {
+ return ol_(this, false);
+ }
+
+ @Override
+ public OL<Hamlet> ol(String selector) {
+ return setSelector(ol(), selector);
+ }
+
+ @Override
+ public PRE<Hamlet> pre() {
+ return pre_(this, false);
+ }
+
+ @Override
+ public PRE<Hamlet> pre(String selector) {
+ return setSelector(pre(), selector);
+ }
+
+ @Override
+ public FORM<Hamlet> form() {
+ return form_(this, false);
+ }
+
+ @Override
+ public FORM<Hamlet> form(String selector) {
+ return setSelector(form(), selector);
+ }
+
+ @Override
+ public FIELDSET<Hamlet> fieldset() {
+ return fieldset_(this, false);
+ }
+
+ @Override
+ public FIELDSET<Hamlet> fieldset(String selector) {
+ return setSelector(fieldset(), selector);
+ }
+
+ @Override
+ public INS<Hamlet> ins() {
+ return ins_(this, false);
+ }
+
+ @Override
+ public Hamlet ins(String cdata) {
+ return ins()._(cdata)._();
+ }
+
+ @Override
+ public DEL<Hamlet> del() {
+ return del_(this, false);
+ }
+
+ @Override
+ public Hamlet del(String cdata) {
+ return del()._(cdata)._();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletGen.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletGen.java
new file mode 100644
index 0000000..e4d8dbcc
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletGen.java
@@ -0,0 +1,447 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.hamlet;
+
+import com.google.common.collect.Sets;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Method;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.util.Locale;
+import java.util.Set;
+import java.util.regex.Pattern;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.hadoop.yarn.webapp.WebAppException;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Generates a specific hamlet implementation class from a spec class
+ * using a generic hamlet implementation class.
+ */
+public class HamletGen {
+ static final Logger LOG = LoggerFactory.getLogger(HamletGen.class);
+ static final Options opts = new Options();
+ static {
+ opts.addOption("h", "help", false, "Print this help message").
+ addOption("s", "spec-class", true,
+ "The class that holds the spec interfaces. e.g. HamletSpec").
+ addOption("i", "impl-class", true,
+ "An implementation class. e.g. HamletImpl").
+ addOption("o", "output-class", true, "Output class name").
+ addOption("p", "output-package", true, "Output package name");
+ };
+
+ static final Pattern elementRegex = Pattern.compile("^[A-Z][A-Z0-9]*$");
+
+ int bytes = 0;
+ PrintWriter out;
+ final Set<String> endTagOptional = Sets.newHashSet();
+ final Set<String> inlineElements = Sets.newHashSet();
+ Class<?> top; // html top-level interface
+ String hamlet; // output class simple name;
+ boolean topMode;
+
+ /**
+ * Generate a specific Hamlet implementation from a spec.
+ * @param specClass holds hamlet interfaces. e.g. {@link HamletSpec}
+ * @param implClass a generic hamlet implementation. e.g. {@link HamletImpl}
+ * @param outputName name of the output class. e.g. {@link Hamlet}
+ * @param outputPkg package name of the output class.
+ * @throws IOException
+ */
+ public void generate(Class<?> specClass, Class<?> implClass,
+ String outputName, String outputPkg) throws IOException {
+ LOG.info("Generating {} using {} and {}", new Object[]{outputName,
+ specClass, implClass});
+ out = new PrintWriter(outputName +".java", "UTF-8");
+ hamlet = basename(outputName);
+ String pkg = pkgName(outputPkg, implClass.getPackage().getName());
+ puts(0, "// Generated by HamletGen. Do NOT edit!\n",
+ "package ", pkg, ";\n",
+ "import java.io.PrintWriter;\n",
+ "import java.util.EnumSet;\n",
+ "import static java.util.EnumSet.*;\n",
+ "import static ", implClass.getName(), ".EOpt.*;\n",
+ "import org.apache.hadoop.yarn.webapp.SubView;");
+ String implClassName = implClass.getSimpleName();
+ if (!implClass.getPackage().getName().equals(pkg)) {
+ puts(0, "import ", implClass.getName(), ';');
+ }
+ puts(0, "\n",
+ "public class ", hamlet, " extends ", implClassName,
+ " implements ", specClass.getSimpleName(), "._Html {\n",
+ " public ", hamlet, "(PrintWriter out, int nestLevel,",
+ " boolean wasInline) {\n",
+ " super(out, nestLevel, wasInline);\n",
+ " }\n\n", // inline is context sensitive
+ " static EnumSet<EOpt> opt(boolean endTag, boolean inline, ",
+ "boolean pre) {\n",
+ " EnumSet<EOpt> opts = of(ENDTAG);\n",
+ " if (!endTag) opts.remove(ENDTAG);\n",
+ " if (inline) opts.add(INLINE);\n",
+ " if (pre) opts.add(PRE);\n",
+ " return opts;\n",
+ " }");
+ initLut(specClass);
+ genImpl(specClass, implClassName, 1);
+ LOG.info("Generating {} methods", hamlet);
+ genMethods(hamlet, top, 1);
+ puts(0, "}");
+ out.close();
+ LOG.info("Wrote {} bytes to {}.java", bytes, outputName);
+ }
+
+ String basename(String path) {
+ return path.substring(path.lastIndexOf('/') + 1);
+ }
+
+ String pkgName(String pkg, String defaultPkg) {
+ if (pkg == null || pkg.isEmpty()) return defaultPkg;
+ return pkg;
+ }
+
+ void initLut(Class<?> spec) {
+ endTagOptional.clear();
+ inlineElements.clear();
+ for (Class<?> cls : spec.getClasses()) {
+ Annotation a = cls.getAnnotation(HamletSpec.Element.class);
+ if (a != null && !((HamletSpec.Element) a).endTag()) {
+ endTagOptional.add(cls.getSimpleName());
+ }
+ if (cls.getSimpleName().equals("Inline")) {
+ for (Method method : cls.getMethods()) {
+ String retName = method.getReturnType().getSimpleName();
+ if (isElement(retName)) {
+ inlineElements.add(retName);
+ }
+ }
+ }
+ }
+ }
+
+ void genImpl(Class<?> spec, String implClassName, int indent) {
+ String specName = spec.getSimpleName();
+ for (Class<?> cls : spec.getClasses()) {
+ String className = cls.getSimpleName();
+ if (cls.isInterface()) {
+ genFactoryMethods(cls, indent);
+ }
+ if (isElement(className)) {
+ LOG.info("Generating class {}<T>", className);
+ puts(indent, "\n",
+ "public class ", className, "<T extends _>",
+ " extends EImp<T> implements ", specName, ".", className, " {\n",
+ " public ", className, "(String name, T parent,",
+ " EnumSet<EOpt> opts) {\n",
+ " super(name, parent, opts);\n",
+ " }");
+ genMethods(className, cls, indent + 1);
+ puts(indent, "}");
+ } else if (className.equals("_Html")) {
+ top = cls;
+ }
+ }
+ }
+
+ void genFactoryMethods(Class<?> cls, int indent) {
+ for (Method method : cls.getDeclaredMethods()) {
+ String retName = method.getReturnType().getSimpleName();
+ String methodName = method.getName();
+ if (methodName.charAt(0) == '$') continue;
+ if (isElement(retName) && method.getParameterTypes().length == 0) {
+ genFactoryMethod(retName, methodName, indent);
+ }
+ }
+ }
+
+ void genMethods(String className, Class<?> cls, int indent) {
+ topMode = (top != null && cls.equals(top));
+ for (Method method : cls.getMethods()) {
+ String retName = method.getReturnType().getSimpleName();
+ if (method.getName().charAt(0) == '$') {
+ genAttributeMethod(className, method, indent);
+ } else if (isElement(retName)) {
+ genNewElementMethod(className, method, indent);
+ } else {
+ genCurElementMethod(className, method, indent);
+ }
+ }
+ }
+
+ void genAttributeMethod(String className, Method method, int indent) {
+ String methodName = method.getName();
+ String attrName = methodName.substring(1).replace('_', '-');
+ Type[] params = method.getGenericParameterTypes();
+ echo(indent, "\n",
+ "@Override\n",
+ "public ", className, topMode ? " " : "<T> ", methodName, "(");
+ if (params.length == 0) {
+ puts(0, ") {");
+ puts(indent,
+ " addAttr(\"", attrName, "\", null);\n",
+ " return this;\n", "}");
+ } else if (params.length == 1) {
+ String typeName = getTypeName(params[0]);
+ puts(0, typeName, " value) {");
+ if (typeName.equals("EnumSet<LinkType>")) {
+ puts(indent,
+ " addRelAttr(\"", attrName, "\", value);\n",
+ " return this;\n", "}");
+ } else if (typeName.equals("EnumSet<Media>")) {
+ puts(indent,
+ " addMediaAttr(\"", attrName, "\", value);\n",
+ " return this;\n", "}");
+ } else {
+ puts(indent,
+ " addAttr(\"", attrName, "\", value);\n",
+ " return this;\n", "}");
+ }
+ } else {
+ throwUnhandled(className, method);
+ }
+ }
+
+ String getTypeName(Type type) {
+ if (type instanceof Class<?>) {
+ return ((Class<?>)type).getSimpleName();
+ }
+ ParameterizedType pt = (ParameterizedType) type;
+ return ((Class<?>)pt.getRawType()).getSimpleName() +"<"+
+ ((Class<?>)pt.getActualTypeArguments()[0]).getSimpleName() +">";
+ }
+
+ void genFactoryMethod(String retName, String methodName, int indent) {
+ puts(indent, "\n",
+ "private <T extends _> ", retName, "<T> ", methodName,
+ "_(T e, boolean inline) {\n",
+ " return new ", retName, "<T>(\"", retName.toLowerCase(Locale.US),
+ "\", e, opt(", !endTagOptional.contains(retName), ", inline, ",
+ retName.equals("PRE"), ")); }");
+ }
+
+ void genNewElementMethod(String className, Method method, int indent) {
+ String methodName = method.getName();
+ String retName = method.getReturnType().getSimpleName();
+ Class<?>[] params = method.getParameterTypes();
+ echo(indent, "\n",
+ "@Override\n",
+ "public ", retName, "<", className, topMode ? "> " : "<T>> ",
+ methodName, "(");
+ if (params.length == 0) {
+ puts(0, ") {");
+ puts(indent,
+ topMode ? "" : " closeAttrs();\n",
+ " return ", retName.toLowerCase(Locale.US), "_(this, ",
+ isInline(className, retName), ");\n", "}");
+ } else if (params.length == 1) {
+ puts(0, "String selector) {");
+ puts(indent,
+ " return setSelector(", methodName, "(), selector);\n", "}");
+ } else {
+ throwUnhandled(className, method);
+ }
+ }
+
+ boolean isInline(String container, String className) {
+ if ((container.equals("BODY") || container.equals(hamlet) ||
+ container.equals("HEAD") || container.equals("HTML")) &&
+ (className.equals("INS") || className.equals("DEL") ||
+ className.equals("SCRIPT"))) {
+ return false;
+ }
+ return inlineElements.contains(className);
+ }
+
+ void genCurElementMethod(String className, Method method, int indent) {
+ String methodName = method.getName();
+ Class<?>[] params = method.getParameterTypes();
+ if (topMode || params.length > 0) {
+ echo(indent, "\n",
+ "@Override\n",
+ "public ", className, topMode ? " " : "<T> ", methodName, "(");
+ }
+ if (params.length == 0) {
+ if (topMode) {
+ puts(0, ") {");
+ puts(indent, " return this;\n", "}");
+ }
+ } else if (params.length == 1) {
+ if (methodName.equals("base")) {
+ puts(0, "String href) {");
+ puts(indent,
+ " return base().$href(href)._();\n", "}");
+ } else if (methodName.equals("script")) {
+ puts(0, "String src) {");
+ puts(indent,
+ " return setScriptSrc(script(), src)._();\n", "}");
+ } else if (methodName.equals("style")) {
+ puts(0, "Object... lines) {");
+ puts(indent,
+ " return style().$type(\"text/css\")._(lines)._();\n", "}");
+ } else if (methodName.equals("img")) {
+ puts(0, "String src) {");
+ puts(indent,
+ " return ", methodName, "().$src(src)._();\n", "}");
+ } else if (methodName.equals("br") || methodName.equals("hr") ||
+ methodName.equals("col")) {
+ puts(0, "String selector) {");
+ puts(indent,
+ " return setSelector(", methodName, "(), selector)._();\n", "}");
+ } else if (methodName.equals("link")) {
+ puts(0, "String href) {");
+ puts(indent,
+ " return setLinkHref(", methodName, "(), href)._();\n", "}");
+ } else if (methodName.equals("_")) {
+ if (params[0].getSimpleName().equals("Class")) {
+ puts(0, "Class<? extends SubView> cls) {");
+ puts(indent,
+ " ", topMode ? "subView" : "_v", "(cls);\n",
+ " return this;\n", "}");
+ } else {
+ puts(0, "Object... lines) {");
+ puts(indent,
+ " _p(", needsEscaping(className), ", lines);\n",
+ " return this;\n", "}");
+ }
+ } else if (methodName.equals("_r")) {
+ puts(0, "Object... lines) {");
+ puts(indent,
+ " _p(false, lines);\n",
+ " return this;\n", "}");
+ } else {
+ puts(0, "String cdata) {");
+ puts(indent,
+ " return ", methodName, "()._(cdata)._();\n", "}");
+ }
+ } else if (params.length == 2) {
+ if (methodName.equals("meta")) {
+ puts(0, "String name, String content) {");
+ puts(indent,
+ " return meta().$name(name).$content(content)._();\n", "}");
+ } else if (methodName.equals("meta_http")) {
+ puts(0, "String header, String content) {");
+ puts(indent,
+ " return meta().$http_equiv(header).$content(content)._();\n",
+ "}");
+ } else if (methodName.equals("a")) {
+ puts(0, "String href, String anchorText) {");
+ puts(indent,
+ " return a().$href(href)._(anchorText)._();\n", "}");
+ } else if (methodName.equals("bdo")) {
+ puts(0, "Dir dir, String cdata) {");
+ puts(indent, " return bdo().$dir(dir)._(cdata)._();\n", "}");
+ } else if (methodName.equals("label")) {
+ puts(0, "String forId, String cdata) {");
+ puts(indent, " return label().$for(forId)._(cdata)._();\n", "}");
+ } else if (methodName.equals("param")) {
+ puts(0, "String name, String value) {");
+ puts(indent,
+ " return param().$name(name).$value(value)._();\n", "}");
+ } else {
+ puts(0, "String selector, String cdata) {");
+ puts(indent,
+ " return setSelector(", methodName,
+ "(), selector)._(cdata)._();\n", "}");
+ }
+ } else if (params.length == 3) {
+ if (methodName.equals("a")) {
+ puts(0, "String selector, String href, String anchorText) {");
+ puts(indent,
+ " return setSelector(a(), selector)",
+ ".$href(href)._(anchorText)._();\n", "}");
+ }
+ } else {
+ throwUnhandled(className, method);
+ }
+ }
+
+ static boolean needsEscaping(String eleName) {
+ return !eleName.equals("SCRIPT") && !eleName.equals("STYLE");
+ }
+
+ static void throwUnhandled(String className, Method method) {
+ throw new WebAppException("Unhandled " + className + "#" + method);
+ }
+
+ void echo(int indent, Object... args) {
+ String prev = null;
+ for (Object o : args) {
+ String s = String.valueOf(o);
+ if (!s.isEmpty() && !s.equals("\n") &&
+ (prev == null || prev.endsWith("\n"))) {
+ indent(indent);
+ }
+ prev = s;
+ out.print(s);
+ bytes += s.length();
+ }
+ }
+
+ void indent(int indent) {
+ for (int i = 0; i < indent; ++i) {
+ out.print(" ");
+ bytes += 2;
+ }
+ }
+
+ void puts(int indent, Object... args) {
+ echo(indent, args);
+ out.println();
+ ++bytes;
+ }
+
+ boolean isElement(String s) {
+ return elementRegex.matcher(s).matches();
+ }
+
+ public static void main(String[] args) throws Exception {
+ CommandLine cmd = new GnuParser().parse(opts, args);
+ if (cmd.hasOption("help")) {
+ new HelpFormatter().printHelp("Usage: hbgen [OPTIONS]", opts);
+ return;
+ }
+ // defaults
+ Class<?> specClass = HamletSpec.class;
+ Class<?> implClass = HamletImpl.class;
+ String outputClass = "HamletTmp";
+ String outputPackage = implClass.getPackage().getName();
+ if (cmd.hasOption("spec-class")) {
+ specClass = Class.forName(cmd.getOptionValue("spec-class"));
+ }
+ if (cmd.hasOption("impl-class")) {
+ implClass = Class.forName(cmd.getOptionValue("impl-class"));
+ }
+ if (cmd.hasOption("output-class")) {
+ outputClass = cmd.getOptionValue("output-class");
+ }
+ if (cmd.hasOption("output-package")) {
+ outputPackage = cmd.getOptionValue("output-package");
+ }
+ new HamletGen().generate(specClass, implClass, outputClass, outputPackage);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java
new file mode 100644
index 0000000..126841b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java
@@ -0,0 +1,385 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.hamlet;
+
+import com.google.common.base.Joiner;
+import static com.google.common.base.Preconditions.*;
+import com.google.common.base.Splitter;
+import com.google.common.collect.Iterables;
+
+import java.io.PrintWriter;
+import java.util.EnumSet;
+import static java.util.EnumSet.*;
+import java.util.Iterator;
+
+import static org.apache.commons.lang.StringEscapeUtils.*;
+import static org.apache.hadoop.yarn.webapp.hamlet.HamletImpl.EOpt.*;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.WebAppException;
+
+
+/**
+ * A simple unbuffered generic hamlet implementation.
+ *
+ * Zero copy but allocation on every element, which could be
+ * optimized to use a thread-local element pool.
+ *
+ * Prints HTML as it builds. So the order is important.
+ */
+@InterfaceAudience.Private
+public class HamletImpl extends HamletSpec {
+ private static final String INDENT_CHARS = " ";
+ private static final Splitter SS = Splitter.on('.').
+ omitEmptyStrings().trimResults();
+ private static final Joiner SJ = Joiner.on(' ');
+ private static final Joiner CJ = Joiner.on(", ");
+ static final int S_ID = 0;
+ static final int S_CLASS = 1;
+
+ int nestLevel;
+ int indents; // number of indent() called. mostly for testing.
+ private final PrintWriter out;
+ private final StringBuilder sb = new StringBuilder(); // not shared
+ private boolean wasInline = false;
+
+ /**
+ * Element options. (whether it needs end tag, is inline etc.)
+ */
+ public enum EOpt {
+ /** needs end(close) tag */
+ ENDTAG,
+ /** The content is inline */
+ INLINE,
+ /** The content is preformatted */
+ PRE
+ };
+
+ /**
+ * The base class for elements
+ * @param <T> type of the parent (containing) element for the element
+ */
+ public class EImp<T extends _> implements _Child {
+ private final String name;
+ private final T parent; // short cut for parent element
+ private final EnumSet<EOpt> opts; // element options
+
+ private boolean started = false;
+ private boolean attrsClosed = false;
+
+ EImp(String name, T parent, EnumSet<EOpt> opts) {
+ this.name = name;
+ this.parent = parent;
+ this.opts = opts;
+ }
+
+ @Override
+ public T _() {
+ closeAttrs();
+ --nestLevel;
+ printEndTag(name, opts);
+ return parent;
+ }
+
+ protected void _p(boolean quote, Object... args) {
+ closeAttrs();
+ for (Object s : args) {
+ if (!opts.contains(PRE)) {
+ indent(opts);
+ }
+ out.print(quote ? escapeHtml(String.valueOf(s))
+ : String.valueOf(s));
+ if (!opts.contains(INLINE) && !opts.contains(PRE)) {
+ out.println();
+ }
+ }
+ }
+
+ protected void _v(Class<? extends SubView> cls) {
+ closeAttrs();
+ subView(cls);
+ }
+
+ protected void closeAttrs() {
+ if (!attrsClosed) {
+ startIfNeeded();
+ ++nestLevel;
+ out.print('>');
+ if (!opts.contains(INLINE) && !opts.contains(PRE)) {
+ out.println();
+ }
+ attrsClosed = true;
+ }
+ }
+
+ protected void addAttr(String name, String value) {
+ checkState(!attrsClosed, "attribute added after content");
+ startIfNeeded();
+ printAttr(name, value);
+ }
+
+ protected void addAttr(String name, Object value) {
+ addAttr(name, String.valueOf(value));
+ }
+
+ protected void addMediaAttr(String name, EnumSet<Media> media) {
+ // 6.13 comma-separated list
+ addAttr(name, CJ.join(media));
+ }
+
+ protected void addRelAttr(String name, EnumSet<LinkType> types) {
+ // 6.12 space-separated list
+ addAttr(name, SJ.join(types));
+ }
+
+ private void startIfNeeded() {
+ if (!started) {
+ printStartTag(name, opts);
+ started = true;
+ }
+ }
+
+ protected void _inline(boolean choice) {
+ if (choice) {
+ opts.add(INLINE);
+ } else {
+ opts.remove(INLINE);
+ }
+ }
+
+ protected void _endTag(boolean choice) {
+ if (choice) {
+ opts.add(ENDTAG);
+ } else {
+ opts.remove(ENDTAG);
+ }
+ }
+
+ protected void _pre(boolean choice) {
+ if (choice) {
+ opts.add(PRE);
+ } else {
+ opts.remove(PRE);
+ }
+ }
+ }
+
+ public class Generic<T extends _> extends EImp<T> implements PCData {
+ Generic(String name, T parent, EnumSet<EOpt> opts) {
+ super(name, parent, opts);
+ }
+
+ public Generic<T> _inline() {
+ super._inline(true);
+ return this;
+ }
+
+ public Generic<T> _noEndTag() {
+ super._endTag(false);
+ return this;
+ }
+
+ public Generic<T> _pre() {
+ super._pre(true);
+ return this;
+ }
+
+ public Generic<T> _attr(String name, String value) {
+ addAttr(name, value);
+ return this;
+ }
+
+ public Generic<Generic<T>> _elem(String name, EnumSet<EOpt> opts) {
+ closeAttrs();
+ return new Generic<Generic<T>>(name, this, opts);
+ }
+
+ public Generic<Generic<T>> elem(String name) {
+ return _elem(name, of(ENDTAG));
+ }
+
+ @Override
+ public Generic<T> _(Object... lines) {
+ _p(true, lines);
+ return this;
+ }
+
+ @Override
+ public Generic<T> _r(Object... lines) {
+ _p(false, lines);
+ return this;
+ }
+ }
+
+ public HamletImpl(PrintWriter out, int nestLevel, boolean wasInline) {
+ this.out = out;
+ this.nestLevel = nestLevel;
+ this.wasInline = wasInline;
+ }
+
+ public int nestLevel() {
+ return nestLevel;
+ }
+
+ public boolean wasInline() {
+ return wasInline;
+ }
+
+ public void setWasInline(boolean state) {
+ wasInline = state;
+ }
+
+ public PrintWriter getWriter() {
+ return out;
+ }
+
+ /**
+ * Create a root-level generic element.
+ * Mostly for testing purpose.
+ * @param <T> type of the parent element
+ * @param name of the element
+ * @param opts {@link EOpt element options}
+ * @return the element
+ */
+ public <T extends _>
+ Generic<T> root(String name, EnumSet<EOpt> opts) {
+ return new Generic<T>(name, null, opts);
+ }
+
+ public <T extends _> Generic<T> root(String name) {
+ return root(name, of(ENDTAG));
+ }
+
+ protected void printStartTag(String name, EnumSet<EOpt> opts) {
+ indent(opts);
+ sb.setLength(0);
+ out.print(sb.append('<').append(name).toString()); // for easier mock test
+ }
+
+ protected void indent(EnumSet<EOpt> opts) {
+ if (opts.contains(INLINE) && wasInline) {
+ return;
+ }
+ if (wasInline) {
+ out.println();
+ }
+ wasInline = opts.contains(INLINE) || opts.contains(PRE);
+ for (int i = 0; i < nestLevel; ++i) {
+ out.print(INDENT_CHARS);
+ }
+ ++indents;
+ }
+
+ protected void printEndTag(String name, EnumSet<EOpt> opts) {
+ if (!opts.contains(ENDTAG)) {
+ return;
+ }
+ if (!opts.contains(PRE)) {
+ indent(opts);
+ } else {
+ wasInline = opts.contains(INLINE);
+ }
+ sb.setLength(0);
+ out.print(sb.append("</").append(name).append('>').toString()); // ditto
+ if (!opts.contains(INLINE)) {
+ out.println();
+ }
+ }
+
+ protected void printAttr(String name, String value) {
+ sb.setLength(0);
+ sb.append(' ').append(name);
+ if (value != null) {
+ sb.append("=\"").append(value).append("\"");
+ }
+ out.print(sb.toString());
+ }
+
+ /**
+ * Sub-classes should override this to do something interesting.
+ * @param cls the sub-view class
+ */
+ protected void subView(Class<? extends SubView> cls) {
+ indent(of(ENDTAG)); // not an inline view
+ sb.setLength(0);
+ out.print(sb.append('[').append(cls.getName()).append(']').toString());
+ out.println();
+ }
+
+ /**
+ * Parse selector into id and classes
+ * @param selector in the form of (#id)?(.class)*
+ * @return an two element array [id, "space-separated classes"].
+ * Either element could be null.
+ * @throws WebAppException when both are null or syntax error.
+ */
+ public static String[] parseSelector(String selector) {
+ String[] result = new String[]{null, null};
+ Iterable<String> rs = SS.split(selector);
+ Iterator<String> it = rs.iterator();
+ if (it.hasNext()) {
+ String maybeId = it.next();
+ if (maybeId.charAt(0) == '#') {
+ result[S_ID] = maybeId.substring(1);
+ if (it.hasNext()) {
+ result[S_CLASS] = SJ.join(Iterables.skip(rs, 1));
+ }
+ } else {
+ result[S_CLASS] = SJ.join(rs);
+ }
+ return result;
+ }
+ throw new WebAppException("Error parsing selector: "+ selector);
+ }
+
+ /**
+ * Set id and/or class attributes for an element.
+ * @param <E> type of the element
+ * @param e the element
+ * @param selector Haml form of "(#id)?(.class)*"
+ * @return the element
+ */
+ public static <E extends CoreAttrs> E setSelector(E e, String selector) {
+ String[] res = parseSelector(selector);
+ if (res[S_ID] != null) {
+ e.$id(res[S_ID]);
+ }
+ if (res[S_CLASS] != null) {
+ e.$class(res[S_CLASS]);
+ }
+ return e;
+ }
+
+ public static <E extends LINK> E setLinkHref(E e, String href) {
+ if (href.endsWith(".css")) {
+ e.$rel("stylesheet"); // required in html5
+ }
+ e.$href(href);
+ return e;
+ }
+
+ public static <E extends SCRIPT> E setScriptSrc(E e, String src) {
+ if (src.endsWith(".js")) {
+ e.$type("text/javascript"); // required in html4
+ }
+ e.$src(src);
+ return e;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletSpec.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletSpec.java
new file mode 100644
index 0000000..f5b72bc
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletSpec.java
@@ -0,0 +1,3099 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.hamlet;
+
+import java.lang.annotation.*;
+import java.util.EnumSet;
+
+import org.apache.hadoop.yarn.webapp.SubView;
+
+/**
+ * HTML5 compatible HTML4 builder interfaces.
+ *
+ * <p>Generated from HTML 4.01 strict DTD and HTML5 diffs.
+ * <br>cf. http://www.w3.org/TR/html4/
+ * <br>cf. http://www.w3.org/TR/html5-diff/
+ * <p> The omitted attributes and elements (from the 4.01 DTD)
+ * are for HTML5 compatibility.
+ *
+ * <p>Note, the common argument selector uses the same syntax as Haml/Sass:
+ * <pre> selector ::= (#id)?(.class)*</pre>
+ * cf. http://haml-lang.com/
+ *
+ * <p>The naming convention used in this class is slightly different from
+ * normal classes. A CamelCase interface corresponds to an entity in the DTD.
+ * _CamelCase is for internal refactoring. An element builder interface is in
+ * UPPERCASE, corresponding to an element definition in the DTD. $lowercase is
+ * used as attribute builder methods to differentiate from element builder
+ * methods.
+ */
+public class HamletSpec {
+ // The enum values are lowercase for better compression,
+ // while avoiding runtime conversion.
+ // cf. http://www.w3.org/Protocols/HTTP/Performance/Compression/HTMLCanon.html
+ // http://www.websiteoptimization.com/speed/tweak/lowercase/
+ /** %Shape (case-insensitive) */
+ public enum Shape {
+ /**
+ * rectangle
+ */
+ rect,
+ /**
+ * circle
+ */
+ circle,
+ /**
+ * polygon
+ */
+ poly,
+ /**
+ * default
+ */
+ Default
+ };
+
+ /** Values for the %18n dir attribute (case-insensitive) */
+ public enum Dir {
+ /**
+ * left to right
+ */
+ ltr,
+ /**
+ * right to left
+ */
+ rtl
+ };
+
+ /** %MediaDesc (case-sensitive) */
+ public enum Media {
+ /**
+ * computer screen
+ */
+ screen,
+ /**
+ * teletype/terminal
+ */
+ tty,
+ /**
+ * television
+ */
+ tv,
+ /**
+ * projection
+ */
+ projection,
+ /**
+ * mobile device
+ */
+ handheld,
+ /**
+ * print media
+ */
+ print,
+ /**
+ * braille
+ */
+ braille,
+ /**
+ * aural
+ */
+ aural,
+ /**
+ * suitable all media
+ */
+ all
+ };
+
+ /** %LinkTypes (case-insensitive) */
+ public enum LinkType {
+ /**
+ *
+ */
+ alternate,
+ /**
+ *
+ */
+ stylesheet,
+ /**
+ *
+ */
+ start,
+ /**
+ *
+ */
+ next,
+ /**
+ *
+ */
+ prev,
+ /**
+ *
+ */
+ contents,
+ /**
+ *
+ */
+ index,
+ /**
+ *
+ */
+ glossary,
+ /**
+ *
+ */
+ copyright,
+ /**
+ *
+ */
+ chapter,
+ /**
+ *
+ */
+ section,
+ /**
+ *
+ */
+ subsection,
+ /**
+ *
+ */
+ appendix,
+ /**
+ *
+ */
+ help,
+ /**
+ *
+ */
+ bookmark
+ };
+
+ /** Values for form methods (case-insensitive) */
+ public enum Method {
+ /**
+ * HTTP GET
+ */
+ get,
+ /**
+ * HTTP POST
+ */
+ post
+ };
+
+ /** %InputType (case-insensitive) */
+ public enum InputType {
+ /**
+ *
+ */
+ text,
+ /**
+ *
+ */
+ password,
+ /**
+ *
+ */
+ checkbox,
+ /**
+ *
+ */
+ radio,
+ /**
+ *
+ */
+ submit,
+ /**
+ *
+ */
+ reset,
+ /**
+ *
+ */
+ file,
+ /**
+ *
+ */
+ hidden,
+ /**
+ *
+ */
+ image,
+ /**
+ *
+ */
+ button
+ };
+
+ /** Values for button types */
+ public enum ButtonType {
+ /**
+ *
+ */
+ button,
+ /**
+ *
+ */
+ submit,
+ /**
+ *
+ */
+ reset
+ };
+
+ /** %Scope (case-insensitive) */
+ public enum Scope {
+ /**
+ *
+ */
+ row,
+ /**
+ *
+ */
+ col,
+ /**
+ *
+ */
+ rowgroup,
+ /**
+ *
+ */
+ colgroup
+ };
+
+ /**
+ * The element annotation for specifying element options other than
+ * attributes and allowed child elements
+ */
+ @Target({ElementType.TYPE})
+ @Retention(RetentionPolicy.RUNTIME)
+ public @interface Element {
+ /**
+ * Whether the start tag is required for the element.
+ * @return true if start tag is required
+ */
+ boolean startTag() default true;
+
+ /**
+ * Whether the end tag is required.
+ * @return true if end tag is required
+ */
+ boolean endTag() default true;
+ }
+
+ /**
+ *
+ */
+ public interface _ {}
+
+ /**
+ *
+ */
+ public interface _Child extends _ {
+ /**
+ * Finish the current element.
+ * @return the parent element
+ */
+ _ _();
+ }
+
+ /**
+ *
+ */
+ public interface _Script {
+ /**
+ * Add a script element.
+ * @return a script element builder
+ */
+ SCRIPT script();
+
+ /**
+ * Add a script element
+ * @param src uri of the script
+ * @return the current element builder
+ */
+ _Script script(String src);
+ }
+
+ /**
+ *
+ */
+ public interface _Object {
+ /**
+ * Add an object element.
+ * @return an object element builder
+ */
+ OBJECT object();
+
+ /**
+ * Add an object element.
+ * @param selector as #id.class etc.
+ * @return an object element builder
+ */
+ OBJECT object(String selector);
+ }
+
+ /** %head.misc */
+ public interface HeadMisc extends _Script, _Object {
+ /**
+ * Add a style element.
+ * @return a style element builder
+ */
+ STYLE style();
+
+ /**
+ * Add a css style element.
+ * @param lines content of the style sheet
+ * @return the current element builder
+ */
+ HeadMisc style(Object... lines);
+
+ /**
+ * Add a meta element.
+ * @return a meta element builder
+ */
+ META meta();
+
+ /**
+ * Add a meta element.
+ * Shortcut of <code>meta().$name(name).$content(content)._();</code>
+ * @param name of the meta element
+ * @param content of the meta element
+ * @return the current element builder
+ */
+ HeadMisc meta(String name, String content);
+
+ /**
+ * Add a meta element with http-equiv attribute.
+ * Shortcut of <br>
+ * <code>meta().$http_equiv(header).$content(content)._();</code>
+ * @param header for the http-equiv attribute
+ * @param content of the header
+ * @return the current element builder
+ */
+ HeadMisc meta_http(String header, String content);
+
+ /**
+ * Add a link element.
+ * @return a link element builder
+ */
+ LINK link();
+
+ /**
+ * Add a link element.
+ * Implementation should try to figure out type by the suffix of href.
+ * So <code>link("style.css");</code> is a shortcut of
+ * <code>link().$rel("stylesheet").$type("text/css").$href("style.css")._();
+ * </code>
+ * @param href of the link
+ * @return the current element builder
+ */
+ HeadMisc link(String href);
+ }
+
+ /** %heading */
+ public interface Heading {
+ /**
+ * Add an H1 element.
+ * @return a new H1 element builder
+ */
+ H1 h1();
+
+ /**
+ * Add a complete H1 element.
+ * @param cdata the content of the element
+ * @return the current element builder
+ */
+ Heading h1(String cdata);
+
+ /**
+ * Add a complete H1 element
+ * @param selector the css selector in the form of (#id)?(.class)*
+ * @param cdata the content of the element
+ * @return the current element builder
+ */
+ Heading h1(String selector, String cdata);
+
+ /**
+ * Add an H2 element.
+ * @return a new H2 element builder
+ */
+ H2 h2();
+
+ /**
+ * Add a complete H2 element.
+ * @param cdata the content of the element
+ * @return the current element builder
+ */
+ Heading h2(String cdata);
+
+ /**
+ * Add a complete H1 element
+ * @param selector the css selector in the form of (#id)?(.class)*
+ * @param cdata the content of the element
+ * @return the current element builder
+ */
+ Heading h2(String selector, String cdata);
+
+ /**
+ * Add an H3 element.
+ * @return a new H3 element builder
+ */
+ H3 h3();
+
+ /**
+ * Add a complete H3 element.
+ * @param cdata the content of the element
+ * @return the current element builder
+ */
+ Heading h3(String cdata);
+
+ /**
+ * Add a complete H1 element
+ * @param selector the css selector in the form of (#id)?(.class)*
+ * @param cdata the content of the element
+ * @return the current element builder
+ */
+ Heading h3(String selector, String cdata);
+
+ /**
+ * Add an H4 element.
+ * @return a new H4 element builder
+ */
+ H4 h4();
+
+ /**
+ * Add a complete H4 element.
+ * @param cdata the content of the element
+ * @return the current element builder
+ */
+ Heading h4(String cdata);
+
+ /**
+ * Add a complete H4 element
+ * @param selector the css selector in the form of (#id)?(.class)*
+ * @param cdata the content of the element
+ * @return the current element builder
+ */
+ Heading h4(String selector, String cdata);
+
+ /**
+ * Add an H5 element.
+ * @return a new H5 element builder
+ */
+ H5 h5();
+
+ /**
+ * Add a complete H5 element.
+ * @param cdata the content of the element
+ * @return the current element builder
+ */
+ Heading h5(String cdata);
+
+ /**
+ * Add a complete H5 element
+ * @param selector the css selector in the form of (#id)?(.class)*
+ * @param cdata the content of the element
+ * @return the current element builder
+ */
+ Heading h5(String selector, String cdata);
+
+ /**
+ * Add an H6 element.
+ * @return a new H6 element builder
+ */
+ H6 h6();
+
+ /**
+ * Add a complete H6 element.
+ * @param cdata the content of the element
+ * @return the current element builder
+ */
+ Heading h6(String cdata);
+
+ /**
+ * Add a complete H6 element.
+ * @param selector the css selector in the form of (#id)?(.class)*
+ * @param cdata the content of the element
+ * @return the current element builder
+ */
+ Heading h6(String selector, String cdata);
+ }
+
+ /** %list */
+ public interface Listing {
+
+ /**
+ * Add a UL (unordered list) element.
+ * @return a new UL element builder
+ */
+ UL ul();
+
+ /**
+ * Add a UL (unordered list) element.
+ * @param selector the css selector in the form of (#id)?(.class)*
+ * @return a new UL element builder
+ */
+ UL ul(String selector);
+
+ /**
+ * Add a OL (ordered list) element.
+ * @return a new UL element builder
+ */
+ OL ol();
+
+ /**
+ * Add a OL (ordered list) element.
+ * @param selector the css selector in the form of (#id)?(.class)*
+ * @return a new UL element builder
+ */
+ OL ol(String selector);
+ }
+
+ /** % preformatted */
+ public interface Preformatted {
+
+ /**
+ * Add a PRE (preformatted) element.
+ * @return a new PRE element builder
+ */
+ PRE pre();
+
+ /**
+ * Add a PRE (preformatted) element.
+ * @param selector the css selector in the form of (#id)?(.class)*
+ * @return a new PRE element builder
+ */
+ PRE pre(String selector);
+ }
+
+ /** %coreattrs */
+ public interface CoreAttrs {
+ /** document-wide unique id
+ * @param id the id
+ * @return the current element builder
+ */
+ CoreAttrs $id(String id);
+
+ /** space-separated list of classes
+ * @param cls the classes
+ * @return the current element builder
+ */
+ CoreAttrs $class(String cls);
+
+ /** associated style info
+ * @param style the style
+ * @return the current element builder
+ */
+ CoreAttrs $style(String style);
+
+ /** advisory title
+ * @param title the title
+ * @return the current element builder
+ */
+ CoreAttrs $title(String title);
+ }
+
+ /** %i18n */
+ public interface I18nAttrs {
+ /** language code
+ * @param lang the code
+ * @return the current element builder
+ */
+ I18nAttrs $lang(String lang);
+
+ /** direction for weak/neutral text
+ * @param dir the {@link Dir} value
+ * @return the current element builder
+ */
+ I18nAttrs $dir(Dir dir);
+ }
+
+ /** %events */
+ public interface EventsAttrs {
+
+ /** a pointer button was clicked
+ * @param onclick the script
+ * @return the current element builder
+ */
+ EventsAttrs $onclick(String onclick);
+
+ /** a pointer button was double clicked
+ * @param ondblclick the script
+ * @return the current element builder
+ */
+ EventsAttrs $ondblclick(String ondblclick);
+
+ /** a pointer button was pressed down
+ * @param onmousedown the script
+ * @return the current element builder
+ */
+ EventsAttrs $onmousedown(String onmousedown);
+
+ /** a pointer button was released
+ * @param onmouseup the script
+ * @return the current element builder
+ */
+ EventsAttrs $onmouseup(String onmouseup);
+
+ /** a pointer was moved onto
+ * @param onmouseover the script
+ * @return the current element builder
+ */
+ EventsAttrs $onmouseover(String onmouseover);
+
+ /** a pointer was moved within
+ * @param onmousemove the script
+ * @return the current element builder
+ */
+ EventsAttrs $onmousemove(String onmousemove);
+
+ /** a pointer was moved away
+ * @param onmouseout the script
+ * @return the current element builder
+ */
+ EventsAttrs $onmouseout(String onmouseout);
+
+ /** a key was pressed and released
+ * @param onkeypress the script
+ * @return the current element builder
+ */
+ EventsAttrs $onkeypress(String onkeypress);
+
+ /** a key was pressed down
+ * @param onkeydown the script
+ * @return the current element builder
+ */
+ EventsAttrs $onkeydown(String onkeydown);
+
+ /** a key was released
+ * @param onkeyup the script
+ * @return the current element builder
+ */
+ EventsAttrs $onkeyup(String onkeyup);
+ }
+
+ /** %attrs */
+ public interface Attrs extends CoreAttrs, I18nAttrs, EventsAttrs {
+ }
+
+ /** Part of %pre.exclusion */
+ public interface _FontSize extends _Child {
+ // BIG omitted cf. http://www.w3.org/TR/html5-diff/
+
+ /**
+ * Add a SMALL (small print) element
+ * @return a new SMALL element builder
+ */
+ SMALL small();
+
+ /**
+ * Add a complete small (small print) element.
+ * Shortcut of: small()._(cdata)._();
+ * @param cdata the content of the element
+ * @return the current element builder
+ */
+ _FontSize small(String cdata);
+
+ /**
+ * Add a complete small (small print) element.
+ * Shortcut of: small().$id(id).$class(class)._(cdata)._();
+ * @param selector css selector in the form of (#id)?(.class)*
+ * @param cdata the content of the element
+ * @return the current element builder
+ */
+ _FontSize small(String selector, String cdata);
+ }
+
+ /** %fontstyle -(%pre.exclusion) */
+ public interface _FontStyle extends _Child {
+ // TT omitted
+
+ /**
+ * Add an I (italic, alt voice/mood) element.
+ * @return the new I element builder
+ */
+ I i();
+
+ /**
+ * Add a complete I (italic, alt voice/mood) element.
+ * @param cdata the content of the element
+ * @return the current element builder
+ */
+ _FontStyle i(String cdata);
+
+ /**
+ * Add a complete I (italic, alt voice/mood) element.
+ * @param selector the css selector in the form of (#id)?(.class)*
+ * @param cdata the content of the element
+ * @return the current element builder
+ */
+ _FontStyle i(String selector, String cdata);
+
+ /**
+ * Add a new B (bold/important) element.
+ * @return a new B element builder
+ */
+ B b();
+
+ /**
+ * Add a complete B (bold/important) element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _FontStyle b(String cdata);
+
+ /**
+ * Add a complete B (bold/important) element.
+ * @param selector the css select (#id)?(.class)*
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _FontStyle b(String selector, String cdata);
+ }
+
+ /** %fontstyle */
+ public interface FontStyle extends _FontStyle, _FontSize {
+ }
+
+ /** %phrase */
+ public interface Phrase extends _Child {
+
+ /**
+ * Add an EM (emphasized) element.
+ * @return a new EM element builder
+ */
+ EM em();
+
+ /**
+ * Add an EM (emphasized) element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ Phrase em(String cdata);
+
+ /**
+ * Add an EM (emphasized) element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @param cdata the content
+ * @return the current element builder
+ */
+ Phrase em(String selector, String cdata);
+
+ /**
+ * Add a STRONG (important) element.
+ * @return a new STRONG element builder
+ */
+ STRONG strong();
+
+ /**
+ * Add a complete STRONG (important) element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ Phrase strong(String cdata);
+
+ /**
+ * Add a complete STRONG (important) element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @param cdata the content
+ * @return the current element builder
+ */
+ Phrase strong(String selector, String cdata);
+
+ /**
+ * Add a DFN element.
+ * @return a new DFN element builder
+ */
+ DFN dfn();
+
+ /**
+ * Add a complete DFN element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ Phrase dfn(String cdata);
+
+ /**
+ * Add a complete DFN element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @param cdata the content
+ * @return the current element builder
+ */
+ Phrase dfn(String selector, String cdata);
+
+ /**
+ * Add a CODE (code fragment) element.
+ * @return a new CODE element builder
+ */
+ CODE code();
+
+ /**
+ * Add a complete CODE element.
+ * @param cdata the code
+ * @return the current element builder
+ */
+ Phrase code(String cdata);
+
+ /**
+ * Add a complete CODE element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @param cdata the code
+ * @return the current element builder
+ */
+ Phrase code(String selector, String cdata);
+
+ /**
+ * Add a SAMP (sample) element.
+ * @return a new SAMP element builder
+ */
+ SAMP samp();
+
+ /**
+ * Add a complete SAMP (sample) element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ Phrase samp(String cdata);
+
+ /**
+ * Add a complete SAMP (sample) element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @param cdata the content
+ * @return the current element builder
+ */
+ Phrase samp(String selector, String cdata);
+
+ /**
+ * Add a KBD (keyboard) element.
+ * @return a new KBD element builder
+ */
+ KBD kbd();
+
+ /**
+ * Add a KBD (keyboard) element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ Phrase kbd(String cdata);
+
+ /**
+ * Add a KBD (keyboard) element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @param cdata the content
+ * @return the current element builder
+ */
+ Phrase kbd(String selector, String cdata);
+
+ /**
+ * Add a VAR (variable) element.
+ * @return a new VAR element builder
+ */
+ VAR var();
+
+ /**
+ * Add a VAR (variable) element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ Phrase var(String cdata);
+
+ /**
+ * Add a VAR (variable) element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @param cdata the content
+ * @return the current element builder
+ */
+ Phrase var(String selector, String cdata);
+
+ /**
+ * Add a CITE element.
+ * @return a new CITE element builder
+ */
+ CITE cite();
+
+ /**
+ * Add a CITE element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ Phrase cite(String cdata);
+
+ /**
+ * Add a CITE element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @param cdata the content
+ * @return the current element builder
+ */
+ Phrase cite(String selector, String cdata);
+
+ /**
+ * Add an ABBR (abbreviation) element.
+ * @return a new ABBR element builder
+ */
+ ABBR abbr();
+
+ /**
+ * Add a ABBR (abbreviation) element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ Phrase abbr(String cdata);
+
+ /**
+ * Add a ABBR (abbreviation) element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @param cdata the content
+ * @return the current element builder
+ */
+ Phrase abbr(String selector, String cdata);
+
+ // ACRONYM omitted, use ABBR
+ }
+
+ /** Part of %pre.exclusion */
+ public interface _ImgObject extends _Object, _Child {
+
+ /**
+ * Add a IMG (image) element.
+ * @return a new IMG element builder
+ */
+ IMG img();
+
+ /**
+ * Add a IMG (image) element.
+ * @param src the source URL of the image
+ * @return the current element builder
+ */
+ _ImgObject img(String src);
+ }
+
+ /** Part of %pre.exclusion */
+ public interface _SubSup extends _Child {
+
+ /**
+ * Add a SUB (subscript) element.
+ * @return a new SUB element builder
+ */
+ SUB sub();
+
+ /**
+ * Add a complete SUB (subscript) element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _SubSup sub(String cdata);
+
+ /**
+ * Add a complete SUB (subscript) element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _SubSup sub(String selector, String cdata);
+
+ /**
+ * Add a SUP (superscript) element.
+ * @return a new SUP element builder
+ */
+ SUP sup();
+
+ /**
+ * Add a SUP (superscript) element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _SubSup sup(String cdata);
+
+ /**
+ * Add a SUP (superscript) element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _SubSup sup(String selector, String cdata);
+ }
+
+ /**
+ *
+ */
+ public interface _Anchor {
+
+ /**
+ * Add a A (anchor) element.
+ * @return a new A element builder
+ */
+ A a();
+
+ /**
+ * Add a A (anchor) element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @return a new A element builder
+ */
+ A a(String selector);
+
+ /** Shortcut for <code>a().$href(href)._(anchorText)._();</code>
+ * @param href the URI
+ * @param anchorText for the URI
+ * @return the current element builder
+ */
+ _Anchor a(String href, String anchorText);
+
+ /** Shortcut for <code>a(selector).$href(href)._(anchorText)._();</code>
+ * @param selector in the form of (#id)?(.class)*
+ * @param href the URI
+ * @param anchorText for the URI
+ * @return the current element builder
+ */
+ _Anchor a(String selector, String href, String anchorText);
+ }
+
+ /**
+ * INS and DEL are unusual for HTML
+ * "in that they may serve as either block-level or inline elements
+ * (but not both)".
+ * <br>cf. http://www.w3.org/TR/html4/struct/text.html#h-9.4
+ * <br>cf. http://www.w3.org/TR/html5/edits.html#edits
+ */
+ public interface _InsDel {
+
+ /**
+ * Add an INS (insert) element.
+ * @return an INS element builder
+ */
+ INS ins();
+
+ /**
+ * Add a complete INS element.
+ * @param cdata inserted data
+ * @return the current element builder
+ */
+ _InsDel ins(String cdata);
+
+ /**
+ * Add a DEL (delete) element.
+ * @return a DEL element builder
+ */
+ DEL del();
+
+ /**
+ * Add a complete DEL element.
+ * @param cdata deleted data
+ * @return the current element builder
+ */
+ _InsDel del(String cdata);
+ }
+
+ /** %special -(A|%pre.exclusion) */
+ public interface _Special extends _Script, _InsDel {
+
+ /**
+ * Add a BR (line break) element.
+ * @return a new BR element builder
+ */
+ BR br();
+
+ /**
+ * Add a BR (line break) element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @return the current element builder
+ */
+ _Special br(String selector);
+
+ /**
+ * Add a MAP element.
+ * @return a new MAP element builder
+ */
+ MAP map();
+
+ /**
+ * Add a MAP element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @return a new MAP element builder
+ */
+ MAP map(String selector);
+
+ /**
+ * Add a Q (inline quotation) element.
+ * @return a q (inline quotation) element builder
+ */
+ Q q();
+
+ /**
+ * Add a complete Q element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _Special q(String cdata);
+
+ /**
+ * Add a Q element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _Special q(String selector, String cdata);
+
+ /**
+ * Add a SPAN element.
+ * @return a new SPAN element builder
+ */
+ SPAN span();
+
+ /**
+ * Add a SPAN element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _Special span(String cdata);
+
+ /**
+ * Add a SPAN element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _Special span(String selector, String cdata);
+
+ /**
+ * Add a bdo (bidirectional override) element
+ * @return a bdo element builder
+ */
+ BDO bdo();
+
+ /**
+ * Add a bdo (bidirectional override) element
+ * @param dir the direction of the text
+ * @param cdata the text
+ * @return the current element builder
+ */
+ _Special bdo(Dir dir, String cdata);
+ }
+
+ /** %special */
+ public interface Special extends _Anchor, _ImgObject, _SubSup, _Special {
+ }
+
+ /**
+ *
+ */
+ public interface _Label extends _Child {
+
+ /**
+ * Add a LABEL element.
+ * @return a new LABEL element builder
+ */
+ LABEL label();
+
+ /**
+ * Add a LABEL element.
+ * Shortcut of <code>label().$for(forId)._(cdata)._();</code>
+ * @param forId the for attribute
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _Label label(String forId, String cdata);
+ }
+
+ /**
+ *
+ */
+ public interface _FormCtrl {
+
+ /**
+ * Add a INPUT element.
+ * @return a new INPUT element builder
+ */
+ INPUT input();
+
+ /**
+ * Add a INPUT element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @return a new INPUT element builder
+ */
+ INPUT input(String selector);
+
+ /**
+ * Add a SELECT element.
+ * @return a new SELECT element builder
+ */
+ SELECT select();
+
+ /**
+ * Add a SELECT element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @return a new SELECT element builder
+ */
+ SELECT select(String selector);
+
+ /**
+ * Add a TEXTAREA element.
+ * @return a new TEXTAREA element builder
+ */
+ TEXTAREA textarea();
+
+ /**
+ * Add a TEXTAREA element.
+ * @param selector
+ * @return a new TEXTAREA element builder
+ */
+ TEXTAREA textarea(String selector);
+
+ /**
+ * Add a complete TEXTAREA element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _FormCtrl textarea(String selector, String cdata);
+
+ /**
+ * Add a BUTTON element.
+ * @return a new BUTTON element builder
+ */
+ BUTTON button();
+
+ /**
+ * Add a BUTTON element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @return a new BUTTON element builder
+ */
+ BUTTON button(String selector);
+
+ /**
+ * Add a complete BUTTON element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _FormCtrl button(String selector, String cdata);
+ }
+
+ /** %formctrl */
+ public interface FormCtrl extends _Label, _FormCtrl {
+ }
+
+ /**
+ *
+ */
+ public interface _Content extends _Child {
+ /**
+ * Content of the element
+ * @param lines of content
+ * @return the current element builder
+ */
+ _Content _(Object... lines);
+ }
+
+ /**
+ *
+ */
+ public interface _RawContent extends _Child {
+ /**
+ * Raw (no need to be HTML escaped) content
+ * @param lines of content
+ * @return the current element builder
+ */
+ _RawContent _r(Object... lines);
+ }
+
+ /** #PCDATA */
+ public interface PCData extends _Content, _RawContent {
+ }
+
+ /** %inline */
+ public interface Inline extends PCData, FontStyle, Phrase, Special, FormCtrl {
+ }
+
+ /**
+ *
+ */
+ public interface I extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface B extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface SMALL extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface EM extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface STRONG extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface DFN extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface CODE extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface SAMP extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface KBD extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface VAR extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface CITE extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface ABBR extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface ACRONYM extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface SUB extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface SUP extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface SPAN extends Attrs, Inline, _Child {
+ }
+
+ /** The dir attribute is required for the BDO element */
+ public interface BDO extends CoreAttrs, I18nAttrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface BR extends CoreAttrs, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface _Form {
+
+ /**
+ * Add a FORM element.
+ * @return a new FORM element builder
+ */
+ FORM form();
+
+ /**
+ * Add a FORM element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @return a new FORM element builder
+ */
+ FORM form(String selector);
+ }
+
+ /**
+ *
+ */
+ public interface _FieldSet {
+
+ /**
+ * Add a FIELDSET element.
+ * @return a new FIELDSET element builder
+ */
+ FIELDSET fieldset();
+
+ /**
+ * Add a FIELDSET element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @return a new FIELDSET element builder
+ */
+ FIELDSET fieldset(String selector);
+ }
+
+ /** %block -(FORM|FIELDSET) */
+ public interface _Block extends Heading, Listing, Preformatted {
+
+ /**
+ * Add a P (paragraph) element.
+ * @return a new P element builder
+ */
+ P p();
+
+ /**
+ * Add a P (paragraph) element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @return a new P element builder
+ */
+ P p(String selector);
+
+ /**
+ * Add a DL (description list) element.
+ * @return a new DL element builder
+ */
+ DL dl();
+
+ /**
+ * Add a DL element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @return a new DL element builder
+ */
+ DL dl(String selector);
+
+ /**
+ * Add a DIV element.
+ * @return a new DIV element builder
+ */
+ DIV div();
+
+ /**
+ * Add a DIV element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @return a new DIV element builder
+ */
+ DIV div(String selector);
+
+ // NOSCRIPT omitted
+ // cf. http://www.w3.org/html/wg/tracker/issues/117
+
+ /**
+ * Add a BLOCKQUOTE element.
+ * @return a new BLOCKQUOTE element builder
+ */
+ BLOCKQUOTE blockquote();
+
+ /**
+ * Alias of blockquote
+ * @return a new BLOCKQUOTE element builder
+ */
+ BLOCKQUOTE bq();
+
+ /**
+ * Add a HR (horizontal rule) element.
+ * @return a new HR element builder
+ */
+ HR hr();
+
+ /**
+ * Add a HR element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @return a new HR element builder
+ */
+ _Block hr(String selector);
+
+ /**
+ * Add a TABLE element.
+ * @return a new TABLE element builder
+ */
+ TABLE table();
+
+ /**
+ * Add a TABLE element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @return a new TABLE element builder
+ */
+ TABLE table(String selector);
+
+ /**
+ * Add a ADDRESS element.
+ * @return a new ADDRESS element builder
+ */
+ ADDRESS address();
+
+ /**
+ * Add a complete ADDRESS element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _Block address(String cdata);
+
+ /**
+ * Embed a sub-view.
+ * @param cls the sub-view class
+ * @return the current element builder
+ */
+ _Block _(Class<? extends SubView> cls);
+ }
+
+ /** %block */
+ public interface Block extends _Block, _Form, _FieldSet {
+ }
+
+ /** %flow */
+ public interface Flow extends Block, Inline {
+ }
+
+ /**
+ *
+ */
+ public interface _Body extends Block, _Script, _InsDel {
+ }
+
+ /**
+ *
+ */
+ public interface BODY extends Attrs, _Body, _Child {
+
+ /**
+ * The document has been loaded.
+ * @param script to invoke
+ * @return the current element builder
+ */
+ BODY $onload(String script);
+
+ /**
+ * The document has been removed
+ * @param script to invoke
+ * @return the current element builder
+ */
+ BODY $onunload(String script);
+ }
+
+ /**
+ *
+ */
+ public interface ADDRESS extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface DIV extends Attrs, Flow, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface A extends Attrs, _Child, /* %inline -(A) */
+ PCData, FontStyle, Phrase, _ImgObject, _Special,
+ _SubSup, FormCtrl {
+ // $charset omitted.
+
+ /** advisory content type
+ * @param cdata the content-type
+ * @return the current element builder
+ */
+ A $type(String cdata);
+
+ // $name omitted. use id instead.
+ /** URI for linked resource
+ * @param uri the URI
+ * @return the current element builder
+ */
+ A $href(String uri);
+
+ /** language code
+ * @param cdata the code
+ * @return the current element builder
+ */
+ A $hreflang(String cdata);
+
+ /** forward link types
+ * @param linkTypes the types
+ * @return the current element builder
+ */
+ A $rel(EnumSet<LinkType> linkTypes);
+
+ /**
+ * forward link types
+ * @param linkTypes space-separated list of link types
+ * @return the current element builder.
+ */
+ A $rel(String linkTypes);
+
+ // $rev omitted. Instead of rev="made", use rel="author"
+
+ /** accessibility key character
+ * @param cdata the key
+ * @return the current element builder
+ */
+ A $accesskey(String cdata);
+
+ // $shape and coords omitted. use area instead of a for image maps.
+ /** position in tabbing order
+ * @param index the index
+ * @return the current element builder
+ */
+ A $tabindex(int index);
+
+ /** the element got the focus
+ * @param script to invoke
+ * @return the current element builder
+ */
+ A $onfocus(String script);
+
+ /** the element lost the focus
+ * @param script to invoke
+ * @return the current element builder
+ */
+ A $onblur(String script);
+ }
+
+ /**
+ *
+ */
+ public interface MAP extends Attrs, Block, _Child {
+
+ /**
+ * Add a AREA element.
+ * @return a new AREA element builder
+ */
+ AREA area();
+
+ /**
+ * Add a AREA element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @return a new AREA element builder
+ */
+ AREA area(String selector);
+
+ /** for reference by usemap
+ * @param name of the map
+ * @return the current element builder
+ */
+ MAP $name(String name);
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface AREA extends Attrs, _Child {
+
+ /** controls interpretation of coords
+ * @param shape of the area
+ * @return the current element builder
+ */
+ AREA $shape(Shape shape);
+
+ /** comma-separated list of lengths
+ * @param cdata coords of the area
+ * @return the current element builder
+ */
+ AREA $coords(String cdata);
+
+ /** URI for linked resource
+ * @param uri the URI
+ * @return the current element builder
+ */
+ AREA $href(String uri);
+
+ // $nohref omitted./
+ /** short description
+ * @param desc the description
+ * @return the current element builder
+ */
+ AREA $alt(String desc);
+
+ /** position in tabbing order
+ * @param index of the order
+ * @return the current element builder
+ */
+ AREA $tabindex(int index);
+
+ /** accessibility key character
+ * @param cdata the key
+ * @return the current element builder
+ */
+ AREA $accesskey(String cdata);
+
+ /** the element got the focus
+ * @param script to invoke
+ * @return the current element builder
+ */
+ AREA $onfocus(String script);
+
+ /** the element lost the focus
+ * @param script to invoke
+ * @return the current element builder
+ */
+ AREA $onblur(String script);
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface LINK extends Attrs, _Child {
+ // $charset omitted
+ /** URI for linked resource
+ * @param uri the URI
+ * @return the current element builder
+ */
+ LINK $href(String uri);
+
+ /** language code
+ * @param cdata the code
+ * @return the current element builder
+ */
+ LINK $hreflang(String cdata);
+
+ /** advisory content type
+ * @param cdata the type
+ * @return the current element builder
+ */
+ LINK $type(String cdata);
+
+ /** forward link types
+ * @param linkTypes the types
+ * @return the current element builder
+ */
+ LINK $rel(EnumSet<LinkType> linkTypes);
+
+ /**
+ * forward link types.
+ * @param linkTypes space-separated link types
+ * @return the current element builder
+ */
+ LINK $rel(String linkTypes);
+
+ // $rev omitted. Instead of rev="made", use rel="author"
+
+ /** for rendering on these media
+ * @param mediaTypes the media types
+ * @return the current element builder
+ */
+ LINK $media(EnumSet<Media> mediaTypes);
+
+ /**
+ * for rendering on these media.
+ * @param mediaTypes comma-separated list of media
+ * @return the current element builder
+ */
+ LINK $media(String mediaTypes);
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface IMG extends Attrs, _Child {
+
+ /** URI of image to embed
+ * @param uri the URI
+ * @return the current element builder
+ */
+ IMG $src(String uri);
+
+ /** short description
+ * @param desc the description
+ * @return the current element builder
+ */
+ IMG $alt(String desc);
+
+ // $longdesc omitted. use <a...><img..></a> instead
+ // $name omitted. use id instead.
+
+ /** override height
+ * @param pixels the height
+ * @return the current element builder
+ */
+ IMG $height(int pixels);
+
+ /**
+ * override height
+ * @param cdata the height (can use %, * etc.)
+ * @return the current element builder
+ */
+ IMG $height(String cdata);
+
+ /** override width
+ * @param pixels the width
+ * @return the current element builder
+ */
+ IMG $width(int pixels);
+
+ /**
+ * override width
+ * @param cdata the width (can use %, * etc.)
+ * @return the current element builder
+ */
+ IMG $width(String cdata);
+
+ /** use client-side image map
+ * @param uri the URI
+ * @return the current element builder
+ */
+ IMG $usemap(String uri);
+
+ /** use server-side image map
+ * @return the current element builder
+ */
+ IMG $ismap();
+ }
+
+ /**
+ *
+ */
+ public interface _Param extends _Child {
+
+ /**
+ * Add a PARAM (parameter) element.
+ * @return a new PARAM element builder
+ */
+ PARAM param();
+
+ /**
+ * Add a PARAM element.
+ * Shortcut of <code>param().$name(name).$value(value)._();</code>
+ * @param name of the value
+ * @param value the value
+ * @return the current element builder
+ */
+ _Param param(String name, String value);
+ }
+
+ /**
+ *
+ */
+ public interface OBJECT extends Attrs, _Param, Flow, _Child {
+ // $declare omitted. repeat element completely
+
+ // $archive, classid, codebase, codetype ommited. use data and type
+
+ /** reference to object's data
+ * @param uri the URI
+ * @return the current element builder
+ */
+ OBJECT $data(String uri);
+
+ /** content type for data
+ * @param contentType the type
+ * @return the current element builder
+ */
+ OBJECT $type(String contentType);
+
+ // $standby omitted. fix the resource instead.
+
+ /** override height
+ * @param pixels the height
+ * @return the current element builder
+ */
+ OBJECT $height(int pixels);
+
+ /**
+ * override height
+ * @param length the height (can use %, *)
+ * @return the current element builder
+ */
+ OBJECT $height(String length);
+
+ /** override width
+ * @param pixels the width
+ * @return the current element builder
+ */
+ OBJECT $width(int pixels);
+
+ /**
+ * override width
+ * @param length the height (can use %, *)
+ * @return the current element builder
+ */
+ OBJECT $width(String length);
+
+ /** use client-side image map
+ * @param uri the URI/name of the map
+ * @return the current element builder
+ */
+ OBJECT $usemap(String uri);
+
+ /** submit as part of form
+ * @param cdata the name of the object
+ * @return the current element builder
+ */
+ OBJECT $name(String cdata);
+
+ /** position in tabbing order
+ * @param index of the order
+ * @return the current element builder
+ */
+ OBJECT $tabindex(int index);
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface PARAM {
+
+ /** document-wide unique id
+ * @param cdata the id
+ * @return the current element builder
+ */
+ PARAM $id(String cdata);
+
+ /** property name. Required.
+ * @param cdata the name
+ * @return the current element builder
+ */
+ PARAM $name(String cdata);
+
+ /** property value
+ * @param cdata the value
+ * @return the current element builder
+ */
+ PARAM $value(String cdata);
+
+ // $type and valuetype omitted
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface HR extends Attrs, _Child {
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface P extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface H1 extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface H2 extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface H3 extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface H4 extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface H5 extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface H6 extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface PRE extends Attrs, _Child, /* (%inline;)* -(%pre.exclusion) */
+ PCData, _FontStyle, Phrase, _Anchor, _Special,
+ FormCtrl {
+ }
+
+ /**
+ *
+ */
+ public interface Q extends Attrs, Inline, _Child {
+
+ /** URI for source document or msg
+ * @param uri the URI
+ * @return the current element builder
+ */
+ Q $cite(String uri);
+ }
+
+ /**
+ *
+ */
+ public interface BLOCKQUOTE extends Attrs, Block, _Script, _Child {
+
+ /** URI for source document or msg
+ * @param uri the URI
+ * @return the current element builder
+ */
+ BLOCKQUOTE $cite(String uri);
+ }
+
+ /**
+ * @see _InsDel INS/DEL quirks.
+ */
+ public interface INS extends Attrs, Flow, _Child {
+ /** info on reason for change
+ * @param uri
+ * @return the current element builder
+ */
+ INS $cite(String uri);
+
+ /** date and time of change
+ * @param datetime
+ * @return the current element builder
+ */
+ INS $datetime(String datetime);
+ }
+
+ /**
+ * @see _InsDel INS/DEL quirks.
+ */
+ public interface DEL extends Attrs, Flow, _Child {
+ /** info on reason for change
+ * @param uri the info URI
+ * @return the current element builder
+ */
+ DEL $cite(String uri);
+
+ /** date and time of change
+ * @param datetime the time
+ * @return the current element builder
+ */
+ DEL $datetime(String datetime);
+ }
+
+ /**
+ *
+ */
+ public interface _Dl extends _Child {
+
+ /**
+ * Add a DT (term of the item) element.
+ * @return a new DT element builder
+ */
+ DT dt();
+
+ /**
+ * Add a complete DT element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _Dl dt(String cdata);
+
+ /**
+ * Add a DD (definition/description) element.
+ * @return a new DD element builder
+ */
+ DD dd();
+
+ /**
+ * Add a complete DD element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _Dl dd(String cdata);
+ }
+
+ /**
+ *
+ */
+ public interface DL extends Attrs, _Dl, _Child {
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface DT extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface DD extends Attrs, Flow, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface _Li extends _Child {
+
+ /**
+ * Add a LI (list item) element.
+ * @return a new LI element builder
+ */
+ LI li();
+
+ /**
+ * Add a LI element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _Li li(String cdata);
+ }
+
+ /**
+ *
+ */
+ public interface OL extends Attrs, _Li, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface UL extends Attrs, _Li, _Child {
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface LI extends Attrs, Flow, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface FORM extends Attrs, _Child, /* (%block;|SCRIPT)+ -(FORM) */
+ _Script, _Block, _FieldSet {
+ /** server-side form handler
+ * @param uri
+ * @return the current element builder
+ */
+ FORM $action(String uri);
+
+ /** HTTP method used to submit the form
+ * @param method
+ * @return the current element builder
+ */
+ FORM $method(Method method);
+
+ /**
+ * contentype for "POST" method.
+ * The default is "application/x-www-form-urlencoded".
+ * Use "multipart/form-data" for input type=file
+ * @param enctype
+ * @return the current element builder
+ */
+ FORM $enctype(String enctype);
+
+ /** list of MIME types for file upload
+ * @param cdata
+ * @return the current element builder
+ */
+ FORM $accept(String cdata);
+
+ /** name of form for scripting
+ * @param cdata
+ * @return the current element builder
+ */
+ FORM $name(String cdata);
+
+ /** the form was submitted
+ * @param script
+ * @return the current element builder
+ */
+ FORM $onsubmit(String script);
+
+ /** the form was reset
+ * @param script
+ * @return the current element builder
+ */
+ FORM $onreset(String script);
+
+ /** (space and/or comma separated) list of supported charsets
+ * @param cdata
+ * @return the current element builder
+ */
+ FORM $accept_charset(String cdata);
+ }
+
+ /**
+ *
+ */
+ public interface LABEL extends Attrs, _Child, /* (%inline;)* -(LABEL) */
+ PCData, FontStyle, Phrase, Special, _FormCtrl {
+ /** matches field ID value
+ * @param cdata
+ * @return the current element builder
+ */
+ LABEL $for(String cdata);
+
+ /** accessibility key character
+ * @param cdata
+ * @return the current element builder
+ */
+ LABEL $accesskey(String cdata);
+
+ /** the element got the focus
+ * @param script
+ * @return the current element builder
+ */
+ LABEL $onfocus(String script);
+
+ /** the element lost the focus
+ * @param script
+ * @return the current element builder
+ */
+ LABEL $onblur(String script);
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface INPUT extends Attrs, _Child {
+ /** what kind of widget is needed. default is "text".
+ * @param inputType
+ * @return the current element builder
+ */
+ INPUT $type(InputType inputType);
+
+ /** submit as part of form
+ * @param cdata
+ * @return the current element builder
+ */
+ INPUT $name(String cdata);
+
+ /** Specify for radio buttons and checkboxes
+ * @param cdata
+ * @return the current element builder
+ */
+ INPUT $value(String cdata);
+
+ /** for radio buttons and check boxes
+ * @return the current element builder
+ */
+ INPUT $checked();
+
+ /** unavailable in this context
+ * @return the current element builder
+ */
+ INPUT $disabled();
+
+ /** for text and passwd
+ * @return the current element builder
+ */
+ INPUT $readonly();
+
+ /** specific to each type of field
+ * @param cdata
+ * @return the current element builder
+ */
+ INPUT $size(String cdata);
+
+ /** max chars for text fields
+ * @param length
+ * @return the current element builder
+ */
+ INPUT $maxlength(int length);
+
+ /** for fields with images
+ * @param uri
+ * @return the current element builder
+ */
+ INPUT $src(String uri);
+
+ /** short description
+ * @param cdata
+ * @return the current element builder
+ */
+ INPUT $alt(String cdata);
+
+ // $usemap omitted. use img instead of input for image maps.
+ /** use server-side image map
+ * @return the current element builder
+ */
+ INPUT $ismap();
+
+ /** position in tabbing order
+ * @param index
+ * @return the current element builder
+ */
+ INPUT $tabindex(int index);
+
+ /** accessibility key character
+ * @param cdata
+ * @return the current element builder
+ */
+ INPUT $accesskey(String cdata);
+
+ /** the element got the focus
+ * @param script
+ * @return the current element builder
+ */
+ INPUT $onfocus(String script);
+
+ /** the element lost the focus
+ * @param script
+ * @return the current element builder
+ */
+ INPUT $onblur(String script);
+
+ /** some text was selected
+ * @param script
+ * @return the current element builder
+ */
+ INPUT $onselect(String script);
+
+ /** the element value was changed
+ * @param script
+ * @return the current element builder
+ */
+ INPUT $onchange(String script);
+
+ /** list of MIME types for file upload (csv)
+ * @param contentTypes
+ * @return the current element builder
+ */
+ INPUT $accept(String contentTypes);
+ }
+
+ /**
+ *
+ */
+ public interface _Option extends _Child {
+ /**
+ * Add a OPTION element.
+ * @return a new OPTION element builder
+ */
+ OPTION option();
+
+ /**
+ * Add a complete OPTION element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _Option option(String cdata);
+ }
+
+ /**
+ *
+ */
+ public interface SELECT extends Attrs, _Option, _Child {
+ /**
+ * Add a OPTGROUP element.
+ * @return a new OPTGROUP element builder
+ */
+ OPTGROUP optgroup();
+
+ /** field name
+ * @param cdata
+ * @return the current element builder
+ */
+ SELECT $name(String cdata);
+
+ /** rows visible
+ * @param rows
+ * @return the current element builder
+ */
+ SELECT $size(int rows);
+
+ /** default is single selection
+ * @return the current element builder
+ */
+ SELECT $multiple();
+
+ /** unavailable in this context
+ * @return the current element builder
+ */
+ SELECT $disabled();
+
+ /** position in tabbing order
+ * @param index
+ * @return the current element builder
+ */
+ SELECT $tabindex(int index);
+
+ /** the element got the focus
+ * @param script
+ * @return the current element builder
+ */
+ SELECT $onfocus(String script);
+
+ /** the element lost the focus
+ * @param script
+ * @return the current element builder
+ */
+ SELECT $onblur(String script);
+
+ /** the element value was changed
+ * @param script
+ * @return the current element builder
+ */
+ SELECT $onchange(String script);
+ }
+
+ /**
+ *
+ */
+ public interface OPTGROUP extends Attrs, _Option, _Child {
+ /** unavailable in this context
+ * @return the current element builder
+ */
+ OPTGROUP $disabled();
+
+ /** for use in hierarchical menus
+ * @param cdata
+ * @return the current element builder
+ */
+ OPTGROUP $label(String cdata);
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface OPTION extends Attrs, PCData, _Child {
+ /** currently selected option
+ * @return the current element builder
+ */
+ OPTION $selected();
+
+ /** unavailable in this context
+ * @return the current element builder
+ */
+ OPTION $disabled();
+
+ /** for use in hierarchical menus
+ * @param cdata
+ * @return the current element builder
+ */
+ OPTION $label(String cdata);
+
+ /** defaults to element content
+ * @param cdata
+ * @return the current element builder
+ */
+ OPTION $value(String cdata);
+ }
+
+ /**
+ *
+ */
+ public interface TEXTAREA extends Attrs, PCData, _Child {
+ /** variable name for the text
+ * @param cdata
+ * @return the current element builder
+ */
+ TEXTAREA $name(String cdata);
+
+ /** visible rows
+ * @param rows
+ * @return the current element builder
+ */
+ TEXTAREA $rows(int rows);
+
+ /** visible columns
+ * @param cols
+ * @return the current element builder
+ */
+ TEXTAREA $cols(int cols);
+
+ /** unavailable in this context
+ * @return the current element builder
+ */
+ TEXTAREA $disabled();
+
+ /** text is readonly
+ * @return the current element builder
+ */
+ TEXTAREA $readonly();
+
+ /** position in tabbing order
+ * @param index
+ * @return the current element builder
+ */
+ TEXTAREA $tabindex(int index);
+
+ /** accessibility key character
+ * @param cdata
+ * @return the current element builder
+ */
+ TEXTAREA $accesskey(String cdata);
+
+ /** the element got the focus
+ * @param script
+ * @return the current element builder
+ */
+ TEXTAREA $onfocus(String script);
+
+ /** the element lost the focus
+ * @param script
+ * @return the current element builder
+ */
+ TEXTAREA $onblur(String script);
+
+ /** some text was selected
+ * @param script
+ * @return the current element builder
+ */
+ TEXTAREA $onselect(String script);
+
+ /** the element value was changed
+ * @param script
+ * @return the current element builder
+ */
+ TEXTAREA $onchange(String script);
+ }
+
+ /**
+ *
+ */
+ public interface _Legend extends _Child {
+ /**
+ * Add a LEGEND element.
+ * @return a new LEGEND element builder
+ */
+ LEGEND legend();
+
+ /**
+ * Add a LEGEND element.
+ * @param cdata
+ * @return the current element builder
+ */
+ _Legend legend(String cdata);
+ }
+
+ /**
+ *
+ */
+ public interface FIELDSET extends Attrs, _Legend, PCData, Flow, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface LEGEND extends Attrs, Inline, _Child {
+ /** accessibility key character
+ * @param cdata
+ * @return the current element builder
+ */
+ LEGEND $accesskey(String cdata);
+ }
+
+ /**
+ *
+ */
+ public interface BUTTON extends /* (%flow;)* -(A|%formctrl|FORM|FIELDSET) */
+ _Block, PCData, FontStyle, Phrase, _Special, _ImgObject, _SubSup, Attrs {
+ /** name of the value
+ * @param cdata
+ * @return the current element builder
+ */
+ BUTTON $name(String cdata);
+
+ /** sent to server when submitted
+ * @param cdata
+ * @return the current element builder
+ */
+ BUTTON $value(String cdata);
+
+ /** for use as form button
+ * @param type
+ * @return the current element builder
+ */
+ BUTTON $type(ButtonType type);
+
+ /** unavailable in this context
+ * @return the current element builder
+ */
+ BUTTON $disabled();
+
+ /** position in tabbing order
+ * @param index
+ * @return the current element builder
+ */
+ BUTTON $tabindex(int index);
+
+ /** accessibility key character
+ * @param cdata
+ * @return the current element builder
+ */
+ BUTTON $accesskey(String cdata);
+
+ /** the element got the focus
+ * @param script
+ * @return the current element builder
+ */
+ BUTTON $onfocus(String script);
+
+ /** the element lost the focus
+ * @param script
+ * @return the current element builder
+ */
+ BUTTON $onblur(String script);
+ }
+
+ /**
+ *
+ */
+ public interface _TableRow {
+ /**
+ * Add a TR (table row) element.
+ * @return a new TR element builder
+ */
+ TR tr();
+
+ /**
+ * Add a TR element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @return a new TR element builder
+ */
+ TR tr(String selector);
+ }
+
+ /**
+ *
+ */
+ public interface _TableCol extends _Child {
+ /**
+ * Add a COL element.
+ * @return a new COL element builder
+ */
+ COL col();
+
+ /**
+ * Add a COL element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @return the current element builder
+ */
+ _TableCol col(String selector);
+ }
+
+ /**
+ *
+ */
+ public interface _Table extends _TableRow, _TableCol {
+ /**
+ * Add a CAPTION element.
+ * @return a new CAPTION element builder
+ */
+ CAPTION caption();
+
+ /**
+ * Add a CAPTION element.
+ * @param cdata
+ * @return the current element builder
+ */
+ _Table caption(String cdata);
+
+ /**
+ * Add a COLGROPU element.
+ * @return a new COLGROUP element builder
+ */
+ COLGROUP colgroup();
+
+ /**
+ * Add a THEAD element.
+ * @return a new THEAD element builder
+ */
+ THEAD thead();
+
+ /**
+ * Add a THEAD element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @return a new THEAD element builder
+ */
+ THEAD thead(String selector);
+
+ /**
+ * Add a TFOOT element.
+ * @return a new TFOOT element builder
+ */
+ TFOOT tfoot();
+
+ /**
+ * Add a TFOOT element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @return a new TFOOT element builder
+ */
+ TFOOT tfoot(String selector);
+
+ /**
+ * Add a tbody (table body) element.
+ * Must be after thead/tfoot and no tr at the same level.
+ * @return a new tbody element builder
+ */
+ TBODY tbody();
+
+ /**
+ * Add a TBODY element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @return a new TBODY element builder
+ */
+ TBODY tbody(String selector);
+
+ // $summary, width, border, frame, rules, cellpadding, cellspacing omitted
+ // use css instead
+ }
+ /**
+ * TBODY should be used after THEAD/TFOOT, iff there're no TABLE.TR elements.
+ */
+ public interface TABLE extends Attrs, _Table, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface CAPTION extends Attrs, Inline, _Child {
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface THEAD extends Attrs, _TableRow, _Child {
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface TFOOT extends Attrs, _TableRow, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface TBODY extends Attrs, _TableRow, _Child {
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface COLGROUP extends Attrs, _TableCol, _Child {
+ /** default number of columns in group. default: 1
+ * @param cols
+ * @return the current element builder
+ */
+ COLGROUP $span(int cols);
+
+ // $width omitted. use css instead.
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface COL extends Attrs, _Child {
+ /** COL attributes affect N columns. default: 1
+ * @param cols
+ * @return the current element builder
+ */
+ COL $span(int cols);
+ // $width omitted. use css instead.
+ }
+
+ /**
+ *
+ */
+ public interface _Tr extends _Child {
+ /**
+ * Add a TH element.
+ * @return a new TH element builder
+ */
+ TH th();
+
+ /**
+ * Add a complete TH element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _Tr th(String cdata);
+
+ /**
+ * Add a TH element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _Tr th(String selector, String cdata);
+
+ /**
+ * Add a TD element.
+ * @return a new TD element builder
+ */
+ TD td();
+
+ /**
+ * Add a TD element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _Tr td(String cdata);
+
+ /**
+ * Add a TD element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _Tr td(String selector, String cdata);
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface TR extends Attrs, _Tr, _Child {
+ }
+
+ /**
+ *
+ */
+ public interface _Cell extends Attrs, Flow, _Child {
+ // $abbr omited. begin cell text with terse text instead.
+ // use $title for elaberation, when appropriate.
+ // $axis omitted. use scope.
+ /** space-separated list of id's for header cells
+ * @param cdata
+ * @return the current element builder
+ */
+ _Cell $headers(String cdata);
+
+ /** scope covered by header cells
+ * @param scope
+ * @return the current element builder
+ */
+ _Cell $scope(Scope scope);
+
+ /** number of rows spanned by cell. default: 1
+ * @param rows
+ * @return the current element builder
+ */
+ _Cell $rowspan(int rows);
+
+ /** number of cols spanned by cell. default: 1
+ * @param cols
+ * @return the current element builder
+ */
+ _Cell $colspan(int cols);
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface TH extends _Cell {
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface TD extends _Cell {
+ }
+
+ /**
+ *
+ */
+ public interface _Head extends HeadMisc {
+ /**
+ * Add a TITLE element.
+ * @return a new TITLE element builder
+ */
+ TITLE title();
+
+ /**
+ * Add a TITLE element.
+ * @param cdata the content
+ * @return the current element builder
+ */
+ _Head title(String cdata);
+
+ /**
+ * Add a BASE element.
+ * @return a new BASE element builder
+ */
+ BASE base();
+
+ /**
+ * Add a complete BASE element.
+ * @param uri
+ * @return the current element builder
+ */
+ _Head base(String uri);
+ }
+
+ /**
+ *
+ */
+ public interface HEAD extends I18nAttrs, _Head, _Child {
+ // $profile omitted
+ }
+
+ /**
+ *
+ */
+ public interface TITLE extends I18nAttrs, PCData, _Child {
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface BASE extends _Child {
+ /** URI that acts as base URI
+ * @param uri
+ * @return the current element builder
+ */
+ BASE $href(String uri);
+ }
+
+ /**
+ *
+ */
+ @Element(endTag=false)
+ public interface META extends I18nAttrs, _Child {
+ /** HTTP response header name
+ * @param header
+ * @return the current element builder
+ */
+ META $http_equiv(String header);
+
+ /** metainformation name
+ * @param name
+ * @return the current element builder
+ */
+ META $name(String name);
+
+ /** associated information
+ * @param cdata
+ * @return the current element builder
+ */
+ META $content(String cdata);
+
+ // $scheme omitted
+ }
+
+ /**
+ *
+ */
+ public interface STYLE extends I18nAttrs, _Content, _Child {
+ /** content type of style language
+ * @param cdata
+ * @return the current element builder
+ */
+ STYLE $type(String cdata);
+
+ /** designed for use with these media
+ * @param media
+ * @return the current element builder
+ */
+ STYLE $media(EnumSet<Media> media);
+
+ /** advisory title
+ * @param cdata
+ * @return the current element builder
+ */
+ STYLE $title(String cdata);
+ }
+
+ /**
+ *
+ */
+ public interface SCRIPT extends _Content, _Child {
+ /** char encoding of linked resource
+ * @param cdata
+ * @return the current element builder
+ */
+ SCRIPT $charset(String cdata);
+
+ /** content type of script language
+ * @param cdata
+ * @return the current element builder
+ */
+ SCRIPT $type(String cdata);
+
+ /** URI for an external script
+ * @param cdata
+ * @return the current element builder
+ */
+ SCRIPT $src(String cdata);
+
+ /** UA may defer execution of script
+ * @param cdata
+ * @return the current element builder
+ */
+ SCRIPT $defer(String cdata);
+ }
+
+ /**
+ *
+ */
+ public interface _Html extends _Head, _Body, _ {
+ /**
+ * Add a HEAD element.
+ * @return a new HEAD element builder
+ */
+ HEAD head();
+
+ /**
+ * Add a BODY element.
+ * @return a new BODY element builder
+ */
+ BODY body();
+
+ /**
+ * Add a BODY element.
+ * @param selector the css selector in the form of (#id)*(.class)*
+ * @return a new BODY element builder
+ */
+ BODY body(String selector);
+ }
+
+ // There is only one HEAD and BODY, in that order.
+ /**
+ * The root element
+ */
+ public interface HTML extends I18nAttrs, _Html {
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/DefaultPage.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/DefaultPage.java
new file mode 100644
index 0000000..40b138b1
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/DefaultPage.java
@@ -0,0 +1,62 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+import com.google.common.base.Joiner;
+import java.util.Enumeration;
+
+import java.util.Map;
+
+public class DefaultPage extends TextPage {
+ static final Joiner valJoiner = Joiner.on(", ");
+
+ @Override
+ public void render() {
+ puts("Request URI: ", request().getRequestURI());
+ puts("Query parameters:");
+ @SuppressWarnings("unchecked")
+ Map<String, String[]> params = request().getParameterMap();
+ for (Map.Entry<String, String[]> e : params.entrySet()) {
+ puts(" ", e.getKey(), "=", valJoiner.join(e.getValue()));
+ }
+ puts("More parameters:");
+ for (Map.Entry<String, String> e : moreParams().entrySet()) {
+ puts(" ", e.getKey(), "=", e.getValue());
+ }
+ puts("Path info: ", request().getPathInfo());
+ puts("Path translated: ", request().getPathTranslated());
+ puts("Auth type: ", request().getAuthType());
+ puts("Remote address: "+ request().getRemoteAddr());
+ puts("Remote user: ", request().getRemoteUser());
+ puts("Servlet attributes:");
+ @SuppressWarnings("unchecked")
+ Enumeration<String> attrNames = request().getAttributeNames();
+ while (attrNames.hasMoreElements()) {
+ String key = attrNames.nextElement();
+ puts(" ", key, "=", request().getAttribute(key));
+ }
+ puts("Headers:");
+ @SuppressWarnings("unchecked")
+ Enumeration<String> headerNames = request().getHeaderNames();
+ while (headerNames.hasMoreElements()) {
+ String key = headerNames.nextElement();
+ puts(" ", key, "=", request().getHeader(key));
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/ErrorPage.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/ErrorPage.java
new file mode 100644
index 0000000..58c5ddd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/ErrorPage.java
@@ -0,0 +1,69 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+import java.io.CharArrayWriter;
+import java.io.PrintWriter;
+
+import static org.apache.hadoop.yarn.webapp.Params.*;
+
+/**
+ * A jquery-ui themeable error page
+ */
+public class ErrorPage extends HtmlPage {
+
+ @Override
+ protected void render(Page.HTML<_> html) {
+ set(JQueryUI.ACCORDION_ID, "msg");
+ String title = "Sorry, got error "+ status();
+ html.
+ title(title).
+ link("/static/yarn.css").
+ _(JQueryUI.class). // an embedded sub-view
+ style("#msg { margin: 1em auto; width: 88%; }",
+ "#msg h1 { padding: 0.2em 1.5em; font: bold 1.3em serif; }").
+ div("#msg").
+ h1(title).
+ div().
+ _("Please consult").
+ a("http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html",
+ "RFC 2616")._(" for meanings of the error code.")._().
+ h1("Error Details").
+ pre().
+ _(errorDetails())._()._()._();
+ }
+
+ protected String errorDetails() {
+ if (!$(ERROR_DETAILS).isEmpty()) {
+ return $(ERROR_DETAILS);
+ }
+ if (error() != null) {
+ return toStackTrace(error(), 1024 * 64);
+ }
+ return "No exception was thrown.";
+ }
+
+ public static String toStackTrace(Throwable error, int cutoff) {
+ // default initial size is 32 chars
+ CharArrayWriter buffer = new CharArrayWriter(8 * 1024);
+ error.printStackTrace(new PrintWriter(buffer));
+ return buffer.size() < cutoff ? buffer.toString()
+ : buffer.toString().substring(0, cutoff);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/FooterBlock.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/FooterBlock.java
new file mode 100644
index 0000000..68284d8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/FooterBlock.java
@@ -0,0 +1,28 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+public class FooterBlock extends HtmlBlock {
+
+ @Override protected void render(Block html) {
+ html.
+ div("#footer.ui-widget").
+ a("http://hadoop.apache.org/", "About Apache Hadoop")._();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HeaderBlock.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HeaderBlock.java
new file mode 100644
index 0000000..d03d2d2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HeaderBlock.java
@@ -0,0 +1,34 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+import static org.apache.hadoop.yarn.webapp.Params.*;
+
+public class HeaderBlock extends HtmlBlock {
+
+ @Override protected void render(Block html) {
+ html.
+ div("#header.ui-widget").
+ div("#user").
+ _("Logged in as: "+ request().getRemoteUser())._().
+ div("#logo").
+ img("/static/hadoop-st.png")._().
+ h1($(TITLE))._();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/Html.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/Html.java
new file mode 100644
index 0000000..5effa8b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/Html.java
@@ -0,0 +1,32 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+import java.util.regex.Pattern;
+
+/**
+ * This class holds utility functions for HTML
+ */
+public class Html {
+ static final Pattern validIdRe = Pattern.compile("^[a-zA-Z_.0-9]+$");
+
+ public static boolean isValidId(String id) {
+ return validIdRe.matcher(id).matches();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java
new file mode 100644
index 0000000..84179f7
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java
@@ -0,0 +1,82 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+import java.io.PrintWriter;
+
+import org.apache.hadoop.yarn.webapp.MimeType;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.WebAppException;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+
+public abstract class HtmlBlock extends TextView implements SubView {
+
+ public class Block extends Hamlet {
+ Block(PrintWriter out, int level, boolean wasInline) {
+ super(out, level, wasInline);
+ }
+
+ @Override
+ protected void subView(Class<? extends SubView> cls) {
+ context().set(nestLevel(), wasInline());
+ render(cls);
+ setWasInline(context().wasInline());
+ }
+ }
+
+ private Block block;
+
+ private Block block() {
+ if (block == null) {
+ block = new Block(writer(), context().nestLevel(), context().wasInline());
+ }
+ return block;
+ }
+
+ protected HtmlBlock() {
+ this(null);
+ }
+
+ protected HtmlBlock(ViewContext ctx) {
+ super(ctx, MimeType.HTML);
+ }
+
+ @Override
+ public void render() {
+ int nestLevel = context().nestLevel();
+ LOG.debug("Rendering {} @{}", getClass(), nestLevel);
+ render(block());
+ if (block.nestLevel() != nestLevel) {
+ throw new WebAppException("Error rendering block: nestLevel="+
+ block.nestLevel() +" expected "+ nestLevel);
+ }
+ context().set(nestLevel, block.wasInline());
+ }
+
+ @Override
+ public void renderPartial() {
+ render();
+ }
+
+ /**
+ * Render a block of html. To be overridden by implementation.
+ * @param html the block to render
+ */
+ protected abstract void render(Block html);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlPage.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlPage.java
new file mode 100644
index 0000000..c33fa97
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlPage.java
@@ -0,0 +1,84 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+import java.io.PrintWriter;
+import java.util.EnumSet;
+
+import org.apache.hadoop.yarn.webapp.MimeType;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.WebAppException;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+
+public abstract class HtmlPage extends TextView {
+
+ public static class _ implements Hamlet._ {
+ }
+
+ public class Page extends Hamlet {
+ Page(PrintWriter out) {
+ super(out, 0, false);
+ }
+
+ @Override
+ protected void subView(Class<? extends SubView> cls) {
+ context().set(nestLevel(), wasInline());
+ render(cls);
+ setWasInline(context().wasInline());
+ }
+
+ public HTML<HtmlPage._> html() {
+ return new HTML<HtmlPage._>("html", null, EnumSet.of(EOpt.ENDTAG));
+ }
+ }
+
+ public static final String DOCTYPE =
+ "<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01//EN\""+
+ " \"http://www.w3.org/TR/html4/strict.dtd\">";
+
+ private Page page;
+
+ private Page page() {
+ if (page == null) {
+ page = new Page(writer());
+ }
+ return page;
+ }
+
+ protected HtmlPage() {
+ this(null);
+ }
+
+ protected HtmlPage(ViewContext ctx) {
+ super(ctx, MimeType.HTML);
+ }
+
+ @Override
+ public void render() {
+ puts(DOCTYPE);
+ render(page().html().meta_http("Content-type", MimeType.HTML));
+ if (page().nestLevel() != 0) {
+ throw new WebAppException("Error rendering page: nestLevel="+
+ page().nestLevel());
+ }
+ }
+
+ protected abstract void render(Page.HTML<_> html);
+}
+
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/InfoBlock.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/InfoBlock.java
new file mode 100644
index 0000000..7cefe1d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/InfoBlock.java
@@ -0,0 +1,59 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+import org.apache.hadoop.yarn.webapp.ResponseInfo;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*;
+
+import com.google.inject.Inject;
+
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
+
+public class InfoBlock extends HtmlBlock {
+ final ResponseInfo info;
+
+ @Inject InfoBlock(ResponseInfo info) {
+ this.info = info;
+ }
+
+ @Override protected void render(Block html) {
+ TABLE<DIV<Hamlet>> table = html.
+ div(_INFO_WRAP).
+ table(_INFO).
+ tr().
+ th().$class(C_TH).$colspan(2)._(info.about())._()._();
+ int i = 0;
+ for (ResponseInfo.Item item : info) {
+ TR<TABLE<DIV<Hamlet>>> tr = table.
+ tr((++i % 2 != 0) ? _ODD : _EVEN).
+ th(item.key);
+ String value = String.valueOf(item.value);
+ if (item.url == null) {
+ tr.td(value);
+ } else {
+ tr.
+ td().
+ a(url(item.url), value)._();
+ }
+ tr._();
+ }
+ table._()._();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
new file mode 100644
index 0000000..7069ef3
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
@@ -0,0 +1,231 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+import com.google.common.collect.Lists;
+import com.google.inject.Inject;
+
+import java.util.List;
+import java.util.Locale;
+import javax.servlet.http.Cookie;
+
+import static org.apache.commons.lang.StringEscapeUtils.*;
+import static org.apache.hadoop.yarn.util.StringHelper.*;
+
+import org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.HTML;
+
+public class JQueryUI extends HtmlBlock {
+ // Render choices (mostly for dataTables)
+ public enum Render {
+ /** small (<~100 rows) table as html, most gracefully degradable */
+ HTML,
+ /** medium (<~2000 rows) table as js array */
+ JS_ARRAY,
+ /** large (<~10000 rows) table loading from server */
+ JS_LOAD,
+ /** huge (>~10000 rows) table processing from server */
+ JS_SERVER
+ };
+
+ // UI params
+ public static final String ACCORDION = "ui.accordion";
+ public static final String ACCORDION_ID = ACCORDION +".id";
+ public static final String DATATABLES = "ui.dataTables";
+ public static final String DATATABLES_ID = DATATABLES +".id";
+ public static final String DATATABLES_SELECTOR = DATATABLES +".selector";
+ public static final String DIALOG = "ui.dialog";
+ public static final String DIALOG_ID = DIALOG +".id";
+ public static final String DIALOG_SELECTOR = DIALOG +".selector";
+ public static final String PROGRESSBAR = "ui.progressbar";
+ public static final String PROGRESSBAR_ID = PROGRESSBAR +".id";
+ public static final String THEMESWITCHER = "ui.themeswitcher";
+ public static final String THEMESWITCHER_ID = THEMESWITCHER +".id";
+ public static final String THEME_KEY = "theme";
+ public static final String COOKIE_THEME = "jquery-ui-theme";
+ // common CSS classes
+ public static final String _PROGRESSBAR =
+ ".ui-progressbar.ui-widget.ui-widget-content.ui-corner-all";
+ public static final String C_PROGRESSBAR =
+ _PROGRESSBAR.replace('.', ' ').trim();
+ public static final String _PROGRESSBAR_VALUE =
+ ".ui-progressbar-value.ui-widget-header.ui-corner-left";
+ public static final String C_PROGRESSBAR_VALUE =
+ _PROGRESSBAR_VALUE.replace('.', ' ').trim();
+ public static final String _INFO_WRAP =
+ ".info-wrap.ui-widget-content.ui-corner-bottom";
+ public static final String _TH = ".ui-state-default";
+ public static final String C_TH = _TH.replace('.', ' ').trim();
+ public static final String C_TABLE = "table";
+ public static final String _INFO = ".info";
+ public static final String _ODD = ".odd";
+ public static final String _EVEN = ".even";
+
+ @Override
+ protected void render(Block html) {
+ html.
+ link(join("https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.9/themes/",
+ getTheme(), "/jquery-ui.css")).
+ link("/static/dt-1.7.5/css/jui-dt.css").
+ script("https://ajax.googleapis.com/ajax/libs/jquery/1.4.4/jquery.min.js").
+ script("https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.9/jquery-ui.min.js").
+ script("/static/dt-1.7.5/js/jquery.dataTables.min.js").
+ script("/static/yarn.dt.plugins.js").
+ script("/static/themeswitcher.js").
+ style("#jsnotice { padding: 0.2em; text-align: center; }",
+ ".ui-progressbar { height: 1em; min-width: 5em }"); // required
+
+ List<String> list = Lists.newArrayList();
+ initAccordions(list);
+ initDataTables(list);
+ initDialogs(list);
+ initProgressBars(list);
+ initThemeSwitcher(list);
+
+ if (!list.isEmpty()) {
+ html.
+ script().$type("text/javascript").
+ _("$(function() {")._(list.toArray())._("});")._();
+ }
+ }
+
+ public static void jsnotice(HTML html) {
+ html.
+ div("#jsnotice.ui-state-error").
+ _("This page works best with javascript enabled.")._();
+ html.
+ script().$type("text/javascript").
+ _("$('#jsnotice').hide();")._();
+ }
+
+ protected void initAccordions(List<String> list) {
+ for (String id : split($(ACCORDION_ID))) {
+ if (Html.isValidId(id)) {
+ String init = $(initID(ACCORDION, id));
+ if (init.isEmpty()) {
+ init = "{autoHeight: false}";
+ }
+ list.add(join(" $('#", id, "').accordion(", init, ");"));
+ }
+ }
+ }
+
+ protected void initDataTables(List<String> list) {
+ String defaultInit = "{bJQueryUI: true, sPaginationType: 'full_numbers'}";
+ for (String id : split($(DATATABLES_ID))) {
+ if (Html.isValidId(id)) {
+ String init = $(initID(DATATABLES, id));
+ if (init.isEmpty()) {
+ init = defaultInit;
+ }
+ list.add(join(" $('#", id, "').dataTable(", init,
+ ").fnSetFilteringDelay(188);"));
+ }
+ }
+ String selector = $(DATATABLES_SELECTOR);
+ if (!selector.isEmpty()) {
+ String init = $(initSelector(DATATABLES));
+ if (init.isEmpty()) {
+ init = defaultInit;
+ }
+ list.add(join(" $('", escapeJavaScript(selector), "').dataTable(", init,
+ ").fnSetFilteringDelay(288);"));
+ }
+ }
+
+ protected void initDialogs(List<String> list) {
+ String defaultInit = "{autoOpen: false, show: transfer, hide: explode}";
+ for (String id : split($(DIALOG_ID))) {
+ if (Html.isValidId(id)) {
+ String init = $(initID(DIALOG, id));
+ if (init.isEmpty()) {
+ init = defaultInit;
+ }
+ String opener = $(djoin(DIALOG, id, "opener"));
+ list.add(join(" $('#", id, "').dialog(", init, ");"));
+ if (!opener.isEmpty() && Html.isValidId(opener)) {
+ list.add(join(" $('#", opener, "').click(function() { ",
+ "$('#", id, "').dialog('open'); return false; });"));
+ }
+ }
+ }
+ String selector = $(DIALOG_SELECTOR);
+ if (!selector.isEmpty()) {
+ String init = $(initSelector(DIALOG));
+ if (init.isEmpty()) {
+ init = defaultInit;
+ }
+ list.add(join(" $('", escapeJavaScript(selector),
+ "').click(function() { $(this).children('.dialog').dialog(",
+ init, "); return false; });"));
+ }
+ }
+
+ protected void initProgressBars(List<String> list) {
+ for (String id : split($(PROGRESSBAR_ID))) {
+ if (Html.isValidId(id)) {
+ String init = $(initID(PROGRESSBAR, id));
+ list.add(join(" $('#", id, "').progressbar(", init, ");"));
+ }
+ }
+ }
+
+ protected void initThemeSwitcher(List<String> list) {
+ for (String id : split($(THEMESWITCHER_ID))) {
+ if (Html.isValidId(id)) {
+ list.add(join(" $('#", id,
+ "').themeswitcher({expires:888, path:'/'});"));
+ break; // one is enough
+ }
+ }
+ }
+
+ protected String getTheme() {
+ String theme = $(THEME_KEY);
+ if (!theme.isEmpty()) {
+ return theme;
+ }
+ Cookie c = cookies().get(COOKIE_THEME);
+ if (c != null) {
+ return c.getValue().toLowerCase(Locale.US).replace("%20", "-");
+ }
+ return "base";
+ }
+
+ public static String initID(String name, String id) {
+ return djoin(name, id, "init");
+ }
+
+ public static String initSelector(String name) {
+ return djoin(name, "selector.init");
+ }
+
+ public static StringBuilder tableInit() {
+ return new StringBuilder("{bJQueryUI:true, aaSorting:[], ").
+ append("sPaginationType: 'full_numbers', iDisplayLength:20, ").
+ append("aLengthMenu:[20, 40, 60, 80, 100]");
+ }
+
+ public static StringBuilder tableInitProgress(StringBuilder init,
+ long numCells) {
+ return init.append(", bProcessing:true, ").
+ append("oLanguage:{sProcessing:'Processing ").
+ append(numCells).append(" cells...").
+ append("<p><img src=\"/static/busy.gif\">'}");
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/Jsons.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/Jsons.java
new file mode 100644
index 0000000..06e5d06
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/Jsons.java
@@ -0,0 +1,56 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+import java.io.PrintWriter;
+
+import static org.apache.hadoop.yarn.util.StringHelper.*;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
+
+/**
+ * JSON helpers
+ */
+public class Jsons {
+ public static final String _SEP = "\",\"";
+
+ public static PrintWriter appendProgressBar(PrintWriter out, String pct) {
+ return out.append("<br title='").append(pct).append("'>").
+ append("<div class='").append(C_PROGRESSBAR).
+ append("' title='").append(pct).append('%').
+ append("'><div class='").append(C_PROGRESSBAR_VALUE).
+ append("' style='width: ").append(pct).
+ append("%'>").append("<\\/div><\\/div>");
+ }
+
+ public static PrintWriter appendProgressBar(PrintWriter out,
+ float progress) {
+ return appendProgressBar(out, String.format("%.1f", progress * 100));
+ }
+
+ public static PrintWriter appendSortable(PrintWriter out, Object value) {
+ return out.append("<br title='").append(String.valueOf(value)).append("'>");
+ }
+
+ public static PrintWriter appendLink(PrintWriter out, Object anchor,
+ String prefix, String... parts) {
+ String anchorText = String.valueOf(anchor);
+ return out.append("<a href='").append(anchor == null ? "#" :
+ ujoin(prefix, parts)).append("'>").append(anchorText).append("<\\/a>");
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/LipsumBlock.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/LipsumBlock.java
new file mode 100644
index 0000000..e7699b7
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/LipsumBlock.java
@@ -0,0 +1,47 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+public class LipsumBlock extends HtmlBlock {
+
+ @Override
+ public void render(Block html) {
+ html.
+ p().
+ _("Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
+ "Vivamus eu dui in ipsum tincidunt egestas ac sed nibh.",
+ "Praesent quis nisl lorem, nec interdum urna.",
+ "Duis sagittis dignissim purus sed sollicitudin.",
+ "Morbi quis diam eu enim semper suscipit.",
+ "Nullam pretium faucibus sapien placerat tincidunt.",
+ "Donec eget lorem at quam fermentum vulputate a ac purus.",
+ "Cras ac dui felis, in pulvinar est.",
+ "Praesent tempor est sed neque pulvinar dictum.",
+ "Nullam magna augue, egestas luctus sollicitudin sed,",
+ "venenatis nec turpis.",
+ "Ut ante enim, congue sed laoreet et, accumsan id metus.",
+ "Mauris tincidunt imperdiet est, sed porta arcu vehicula et.",
+ "Etiam in nisi nunc.",
+ "Phasellus vehicula scelerisque quam, ac dignissim felis euismod a.",
+ "Proin eu ante nisl, vel porttitor eros.",
+ "Aliquam gravida luctus augue, at scelerisque enim consectetur vel.",
+ "Donec interdum tempor nisl, quis laoreet enim venenatis eu.",
+ "Quisque elit elit, vulputate eget porta vel, laoreet ac lacus.")._();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/NavBlock.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/NavBlock.java
new file mode 100644
index 0000000..f139cac
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/NavBlock.java
@@ -0,0 +1,39 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+public class NavBlock extends HtmlBlock {
+
+ @Override protected void render(Block html) {
+ html.
+ div("#nav").
+ h3("Heading1").
+ ul().
+ li("Item 1").
+ li("Item 2").
+ li("...")._().
+ h3("Tools").
+ ul().
+ li().a("/conf", "Configuration")._().
+ li().a("/stacks", "Thread dump")._().
+ li().a("/logs", "Logs")._().
+ li().a("/metrics", "Metrics")._()._()._().
+ div("#themeswitcher")._();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TextPage.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TextPage.java
new file mode 100644
index 0000000..b4536ed
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TextPage.java
@@ -0,0 +1,32 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+import org.apache.hadoop.yarn.webapp.MimeType;
+
+public abstract class TextPage extends TextView {
+
+ protected TextPage() {
+ super(null, MimeType.TEXT);
+ }
+
+ protected TextPage(ViewContext ctx) {
+ super(ctx, MimeType.TEXT);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TextView.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TextView.java
new file mode 100644
index 0000000..c42584f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TextView.java
@@ -0,0 +1,58 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+import java.io.PrintWriter;
+
+import org.apache.hadoop.yarn.webapp.View;
+
+public abstract class TextView extends View {
+
+ private final String contentType;
+
+ protected TextView(ViewContext ctx, String contentType) {
+ super(ctx);
+ this.contentType = contentType;
+ }
+
+ @Override public PrintWriter writer() {
+ response().setContentType(contentType);
+ return super.writer();
+ }
+
+ /**
+ * Print strings as is (no newline, a la php echo).
+ * @param args the strings to print
+ */
+ public void echo(Object... args) {
+ PrintWriter out = writer();
+ for (Object s : args) {
+ out.print(s);
+ }
+ }
+
+ /**
+ * Print strings as a line (new line appended at the end, a la C/Tcl puts).
+ * @param args the strings to print
+ */
+ public void puts(Object... args) {
+ echo(args);
+ writer().println();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnCssLayout.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnCssLayout.java
new file mode 100644
index 0000000..9ee5fa3
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnCssLayout.java
@@ -0,0 +1,93 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+import com.google.inject.Inject;
+
+import org.apache.hadoop.yarn.webapp.SubView;
+
+/**
+ * A reusable, pure-css, cross-browser, left nav, 2 column,
+ * supposedly liquid layout.
+ * Doesn't quite work with resizable themes, kept as an example of the
+ * sad state of css (v2/3 anyway) layout.
+ * @see TwoColumnLayout
+ */
+public class TwoColumnCssLayout extends HtmlPage {
+
+ @Override protected void render(Page.HTML<_> html) {
+ preHead(html);
+ html.
+ title($("title")).
+ link("/static/yarn.css").
+ style(".main { min-height: 100%; height: auto !important; height: 100%;",
+ " margin: 0 auto -4em; border: 0; }",
+ ".footer, .push { height: 4em; clear: both; border: 0 }",
+ ".main.ui-widget-content, .footer.ui-widget-content { border: 0; }",
+ ".cmask { position: relative; clear: both; float: left;",
+ " width: 100%; overflow: hidden; }",
+ ".leftnav .c1right { float: left; width: 200%; position: relative;",
+ " left: 13em; border: 0; /* background: #fff; */ }",
+ ".leftnav .c1wrap { float: right; width: 50%; position: relative;",
+ " right: 13em; padding-bottom: 1em; }",
+ ".leftnav .content { margin: 0 1em 0 14em; position: relative;",
+ " right: 100%; overflow: hidden; }",
+ ".leftnav .nav { float: left; width: 11em; position: relative;",
+ " right: 12em; overflow: hidden; }").
+ _(JQueryUI.class);
+ postHead(html);
+ JQueryUI.jsnotice(html);
+ html.
+ div(".main.ui-widget-content").
+ _(header()).
+ div(".cmask.leftnav").
+ div(".c1right").
+ div(".c1wrap").
+ div(".content").
+ _(content())._()._().
+ div(".nav").
+ _(nav()).
+ div("#themeswitcher")._().
+ div(".push")._()._()._()._()._().
+ div(".footer.ui-widget-content").
+ _(footer())._()._();
+ }
+
+ protected void preHead(Page.HTML<_> html) {
+ }
+
+ protected void postHead(Page.HTML<_> html) {
+ }
+
+ protected Class<? extends SubView> header() {
+ return HeaderBlock.class;
+ }
+
+ protected Class<? extends SubView> content() {
+ return LipsumBlock.class;
+ }
+
+ protected Class<? extends SubView> nav() {
+ return NavBlock.class;
+ }
+
+ protected Class<? extends SubView> footer() {
+ return FooterBlock.class;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnLayout.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnLayout.java
new file mode 100644
index 0000000..c79e7de
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnLayout.java
@@ -0,0 +1,102 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+import com.google.common.collect.Lists;
+import com.google.inject.Inject;
+import java.util.List;
+
+import static org.apache.hadoop.yarn.util.StringHelper.*;
+import static org.apache.hadoop.yarn.webapp.Params.*;
+
+import org.apache.hadoop.yarn.webapp.SubView;
+
+/**
+ * A simpler two column layout implementation. Works with resizable themes.
+ * @see TwoColumnCssLayout
+ */
+public class TwoColumnLayout extends HtmlPage {
+
+ @Override protected void render(Page.HTML<_> html) {
+ preHead(html);
+ html.
+ title($(TITLE)).
+ link("/static/yarn.css").
+ style("#layout { height: 100%; }",
+ "#layout thead td { height: 3em; }",
+ "#layout #navcell { width: 11em; padding: 0 1em; }",
+ "#layout td.content { padding-top: 0 }",
+ "#layout tbody { vertical-align: top; }",
+ "#layout tfoot td { height: 4em; }").
+ _(JQueryUI.class);
+ postHead(html);
+ JQueryUI.jsnotice(html);
+ html.
+ table("#layout.ui-widget-content").
+ thead().
+ tr().
+ td().$colspan(2).
+ _(header())._()._()._().
+ tfoot().
+ tr().
+ td().$colspan(2).
+ _(footer())._()._()._().
+ tbody().
+ tr().
+ td().$id("navcell").
+ _(nav())._().
+ td().$class("content").
+ _(content())._()._()._()._()._();
+ }
+
+ protected void preHead(Page.HTML<_> html) {
+ }
+
+ protected void postHead(Page.HTML<_> html) {
+ }
+
+ protected Class<? extends SubView> header() {
+ return HeaderBlock.class;
+ }
+
+ protected Class<? extends SubView> content() {
+ return LipsumBlock.class;
+ }
+
+ protected Class<? extends SubView> nav() {
+ return NavBlock.class;
+ }
+
+ protected Class<? extends SubView> footer() {
+ return FooterBlock.class;
+ }
+
+ protected void setTableStyles(Page.HTML<_> html, String tableId,
+ String... innerStyles) {
+ List<String> styles = Lists.newArrayList();
+ styles.add(join('#', tableId, "_paginate span {font-weight:normal}"));
+ styles.add(join('#', tableId, " .progress {width:8em}"));
+ styles.add(join('#', tableId, "_processing {top:-1.5em; font-size:1em;"));
+ styles.add(" color:#000; background:rgba(255, 255, 255, 0.8)}");
+ for (String style : innerStyles) {
+ styles.add(join('#', tableId, " ", style));
+ }
+ html.style(styles.toArray());
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarnprototunnelrpc.proto b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarnprototunnelrpc.proto
new file mode 100644
index 0000000..19fd4f9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarnprototunnelrpc.proto
@@ -0,0 +1,17 @@
+option java_package = "org.apache.hadoop.yarn.ipc";
+option java_outer_classname = "RpcProtos";
+option java_generate_equals_and_hash = true;
+
+import "yarn_protos.proto";
+
+message ProtoSpecificRpcRequest {
+ required string method_name = 1;
+ optional bytes request_proto = 2;
+}
+
+message ProtoSpecificRpcResponse {
+ optional bytes response_proto = 1;
+
+ optional bool is_error = 2;
+ optional YarnRemoteExceptionProto exception = 3;
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
new file mode 100644
index 0000000..24224b6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
@@ -0,0 +1,4 @@
+org.apache.hadoop.yarn.security.client.ClientRMSecurityInfo
+org.apache.hadoop.yarn.security.ContainerManagerSecurityInfo
+org.apache.hadoop.yarn.security.SchedulerSecurityInfo
+
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/busy.gif b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/busy.gif
new file mode 100644
index 0000000..058ce1f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/busy.gif
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/css/demo_page.css b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/css/demo_page.css
new file mode 100644
index 0000000..bee7b0d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/css/demo_page.css
@@ -0,0 +1,93 @@
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * General page setup
+ */
+#dt_example {
+ font: 80%/1.45em "Lucida Grande", Verdana, Arial, Helvetica, sans-serif;
+ margin: 0;
+ padding: 0;
+ color: #333;
+ background-color: #fff;
+}
+
+
+#dt_example #container {
+ width: 800px;
+ margin: 30px auto;
+ padding: 0;
+}
+
+
+#dt_example #footer {
+ margin: 50px auto 0 auto;
+ padding: 0;
+}
+
+#dt_example #demo {
+ margin: 30px auto 0 auto;
+}
+
+#dt_example .demo_jui {
+ margin: 30px auto 0 auto;
+}
+
+#dt_example .big {
+ font-size: 1.3em;
+ font-weight: bold;
+ line-height: 1.6em;
+ color: #4E6CA3;
+}
+
+#dt_example .spacer {
+ height: 20px;
+ clear: both;
+}
+
+#dt_example .clear {
+ clear: both;
+}
+
+#dt_example pre {
+ padding: 15px;
+ background-color: #F5F5F5;
+ border: 1px solid #CCCCCC;
+}
+
+#dt_example h1 {
+ margin-top: 2em;
+ font-size: 1.3em;
+ font-weight: normal;
+ line-height: 1.6em;
+ color: #4E6CA3;
+ border-bottom: 1px solid #B0BED9;
+ clear: both;
+}
+
+#dt_example h2 {
+ font-size: 1.2em;
+ font-weight: normal;
+ line-height: 1.6em;
+ color: #4E6CA3;
+ clear: both;
+}
+
+#dt_example a {
+ color: #0063DC;
+ text-decoration: none;
+}
+
+#dt_example a:hover {
+ text-decoration: underline;
+}
+
+#dt_example ul {
+ color: #4E6CA3;
+}
+
+.css_right {
+ float: right;
+}
+
+.css_left {
+ float: left;
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/css/demo_table.css b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/css/demo_table.css
new file mode 100644
index 0000000..3bc0433
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/css/demo_table.css
@@ -0,0 +1,538 @@
+/*
+ * File: demo_table.css
+ * CVS: $Id$
+ * Description: CSS descriptions for DataTables demo pages
+ * Author: Allan Jardine
+ * Created: Tue May 12 06:47:22 BST 2009
+ * Modified: $Date$ by $Author$
+ * Language: CSS
+ * Project: DataTables
+ *
+ * Copyright 2009 Allan Jardine. All Rights Reserved.
+ *
+ * ***************************************************************************
+ * DESCRIPTION
+ *
+ * The styles given here are suitable for the demos that are used with the standard DataTables
+ * distribution (see www.datatables.net). You will most likely wish to modify these styles to
+ * meet the layout requirements of your site.
+ *
+ * Common issues:
+ * 'full_numbers' pagination - I use an extra selector on the body tag to ensure that there is
+ * no conflict between the two pagination types. If you want to use full_numbers pagination
+ * ensure that you either have "example_alt_pagination" as a body class name, or better yet,
+ * modify that selector.
+ * Note that the path used for Images is relative. All images are by default located in
+ * ../images/ - relative to this CSS file.
+ */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables features
+ */
+
+.dataTables_wrapper {
+ position: relative;
+ min-height: 302px;
+ clear: both;
+ _height: 302px;
+ zoom: 1; /* Feeling sorry for IE */
+}
+
+.dataTables_processing {
+ position: absolute;
+ top: 50%;
+ left: 50%;
+ width: 250px;
+ height: 30px;
+ margin-left: -125px;
+ margin-top: -15px;
+ padding: 14px 0 2px 0;
+ border: 1px solid #ddd;
+ text-align: center;
+ color: #999;
+ font-size: 14px;
+ background-color: white;
+}
+
+.dataTables_length {
+ width: 40%;
+ float: left;
+}
+
+.dataTables_filter {
+ width: 50%;
+ float: right;
+ text-align: right;
+}
+
+.dataTables_info {
+ width: 60%;
+ float: left;
+}
+
+.dataTables_paginate {
+ width: 44px;
+ * width: 50px;
+ float: right;
+ text-align: right;
+}
+
+/* Pagination nested */
+.paginate_disabled_previous, .paginate_enabled_previous, .paginate_disabled_next, .paginate_enabled_next {
+ height: 19px;
+ width: 19px;
+ margin-left: 3px;
+ float: left;
+}
+
+.paginate_disabled_previous {
+ background-image: url('../images/back_disabled.jpg');
+}
+
+.paginate_enabled_previous {
+ background-image: url('../images/back_enabled.jpg');
+}
+
+.paginate_disabled_next {
+ background-image: url('../images/forward_disabled.jpg');
+}
+
+.paginate_enabled_next {
+ background-image: url('../images/forward_enabled.jpg');
+}
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables display
+ */
+table.display {
+ margin: 0 auto;
+ clear: both;
+ width: 100%;
+
+ /* Note Firefox 3.5 and before have a bug with border-collapse
+ * ( https://bugzilla.mozilla.org/show%5Fbug.cgi?id=155955 )
+ * border-spacing: 0; is one possible option. Conditional-css.com is
+ * useful for this kind of thing
+ *
+ * Further note IE 6/7 has problems when calculating widths with border width.
+ * It subtracts one px relative to the other browsers from the first column, and
+ * adds one to the end...
+ *
+ * If you want that effect I'd suggest setting a border-top/left on th/td's and
+ * then filling in the gaps with other borders.
+ */
+}
+
+table.display thead th {
+ padding: 3px 18px 3px 10px;
+ border-bottom: 1px solid black;
+ font-weight: bold;
+ cursor: pointer;
+ * cursor: hand;
+}
+
+table.display tfoot th {
+ padding: 3px 18px 3px 10px;
+ border-top: 1px solid black;
+ font-weight: bold;
+}
+
+table.display tr.heading2 td {
+ border-bottom: 1px solid #aaa;
+}
+
+table.display td {
+ padding: 3px 10px;
+}
+
+table.display td.center {
+ text-align: center;
+}
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables sorting
+ */
+
+.sorting_asc {
+ background: url('../images/sort_asc.png') no-repeat center right;
+}
+
+.sorting_desc {
+ background: url('../images/sort_desc.png') no-repeat center right;
+}
+
+.sorting {
+ background: url('../images/sort_both.png') no-repeat center right;
+}
+
+.sorting_asc_disabled {
+ background: url('../images/sort_asc_disabled.png') no-repeat center right;
+}
+
+.sorting_desc_disabled {
+ background: url('../images/sort_desc_disabled.png') no-repeat center right;
+}
+
+
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables row classes
+ */
+table.display tr.odd.gradeA {
+ background-color: #ddffdd;
+}
+
+table.display tr.even.gradeA {
+ background-color: #eeffee;
+}
+
+table.display tr.odd.gradeC {
+ background-color: #ddddff;
+}
+
+table.display tr.even.gradeC {
+ background-color: #eeeeff;
+}
+
+table.display tr.odd.gradeX {
+ background-color: #ffdddd;
+}
+
+table.display tr.even.gradeX {
+ background-color: #ffeeee;
+}
+
+table.display tr.odd.gradeU {
+ background-color: #ddd;
+}
+
+table.display tr.even.gradeU {
+ background-color: #eee;
+}
+
+
+tr.odd {
+ background-color: #E2E4FF;
+}
+
+tr.even {
+ background-color: white;
+}
+
+
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Misc
+ */
+.dataTables_scroll {
+ clear: both;
+}
+
+.dataTables_scrollBody {
+ *margin-top: -1px;
+}
+
+.top, .bottom {
+ padding: 15px;
+ background-color: #F5F5F5;
+ border: 1px solid #CCCCCC;
+}
+
+.top .dataTables_info {
+ float: none;
+}
+
+.clear {
+ clear: both;
+}
+
+.dataTables_empty {
+ text-align: center;
+}
+
+tfoot input {
+ margin: 0.5em 0;
+ width: 100%;
+ color: #444;
+}
+
+tfoot input.search_init {
+ color: #999;
+}
+
+td.group {
+ background-color: #d1cfd0;
+ border-bottom: 2px solid #A19B9E;
+ border-top: 2px solid #A19B9E;
+}
+
+td.details {
+ background-color: #d1cfd0;
+ border: 2px solid #A19B9E;
+}
+
+
+.example_alt_pagination div.dataTables_info {
+ width: 40%;
+}
+
+.paging_full_numbers {
+ width: 400px;
+ height: 22px;
+ line-height: 22px;
+}
+
+.paging_full_numbers span.paginate_button,
+ .paging_full_numbers span.paginate_active {
+ border: 1px solid #aaa;
+ -webkit-border-radius: 5px;
+ -moz-border-radius: 5px;
+ padding: 2px 5px;
+ margin: 0 3px;
+ cursor: pointer;
+ *cursor: hand;
+}
+
+.paging_full_numbers span.paginate_button {
+ background-color: #ddd;
+}
+
+.paging_full_numbers span.paginate_button:hover {
+ background-color: #ccc;
+}
+
+.paging_full_numbers span.paginate_active {
+ background-color: #99B3FF;
+}
+
+table.display tr.even.row_selected td {
+ background-color: #B0BED9;
+}
+
+table.display tr.odd.row_selected td {
+ background-color: #9FAFD1;
+}
+
+
+/*
+ * Sorting classes for columns
+ */
+/* For the standard odd/even */
+tr.odd td.sorting_1 {
+ background-color: #D3D6FF;
+}
+
+tr.odd td.sorting_2 {
+ background-color: #DADCFF;
+}
+
+tr.odd td.sorting_3 {
+ background-color: #E0E2FF;
+}
+
+tr.even td.sorting_1 {
+ background-color: #EAEBFF;
+}
+
+tr.even td.sorting_2 {
+ background-color: #F2F3FF;
+}
+
+tr.even td.sorting_3 {
+ background-color: #F9F9FF;
+}
+
+
+/* For the Conditional-CSS grading rows */
+/*
+ Colour calculations (based off the main row colours)
+ Level 1:
+ dd > c4
+ ee > d5
+ Level 2:
+ dd > d1
+ ee > e2
+ */
+tr.odd.gradeA td.sorting_1 {
+ background-color: #c4ffc4;
+}
+
+tr.odd.gradeA td.sorting_2 {
+ background-color: #d1ffd1;
+}
+
+tr.odd.gradeA td.sorting_3 {
+ background-color: #d1ffd1;
+}
+
+tr.even.gradeA td.sorting_1 {
+ background-color: #d5ffd5;
+}
+
+tr.even.gradeA td.sorting_2 {
+ background-color: #e2ffe2;
+}
+
+tr.even.gradeA td.sorting_3 {
+ background-color: #e2ffe2;
+}
+
+tr.odd.gradeC td.sorting_1 {
+ background-color: #c4c4ff;
+}
+
+tr.odd.gradeC td.sorting_2 {
+ background-color: #d1d1ff;
+}
+
+tr.odd.gradeC td.sorting_3 {
+ background-color: #d1d1ff;
+}
+
+tr.even.gradeC td.sorting_1 {
+ background-color: #d5d5ff;
+}
+
+tr.even.gradeC td.sorting_2 {
+ background-color: #e2e2ff;
+}
+
+tr.even.gradeC td.sorting_3 {
+ background-color: #e2e2ff;
+}
+
+tr.odd.gradeX td.sorting_1 {
+ background-color: #ffc4c4;
+}
+
+tr.odd.gradeX td.sorting_2 {
+ background-color: #ffd1d1;
+}
+
+tr.odd.gradeX td.sorting_3 {
+ background-color: #ffd1d1;
+}
+
+tr.even.gradeX td.sorting_1 {
+ background-color: #ffd5d5;
+}
+
+tr.even.gradeX td.sorting_2 {
+ background-color: #ffe2e2;
+}
+
+tr.even.gradeX td.sorting_3 {
+ background-color: #ffe2e2;
+}
+
+tr.odd.gradeU td.sorting_1 {
+ background-color: #c4c4c4;
+}
+
+tr.odd.gradeU td.sorting_2 {
+ background-color: #d1d1d1;
+}
+
+tr.odd.gradeU td.sorting_3 {
+ background-color: #d1d1d1;
+}
+
+tr.even.gradeU td.sorting_1 {
+ background-color: #d5d5d5;
+}
+
+tr.even.gradeU td.sorting_2 {
+ background-color: #e2e2e2;
+}
+
+tr.even.gradeU td.sorting_3 {
+ background-color: #e2e2e2;
+}
+
+
+/*
+ * Row highlighting example
+ */
+.ex_highlight #example tbody tr.even:hover, #example tbody tr.even td.highlighted {
+ background-color: #ECFFB3;
+}
+
+.ex_highlight #example tbody tr.odd:hover, #example tbody tr.odd td.highlighted {
+ background-color: #E6FF99;
+}
+
+.ex_highlight_row #example tr.even:hover {
+ background-color: #ECFFB3;
+}
+
+.ex_highlight_row #example tr.even:hover td.sorting_1 {
+ background-color: #DDFF75;
+}
+
+.ex_highlight_row #example tr.even:hover td.sorting_2 {
+ background-color: #E7FF9E;
+}
+
+.ex_highlight_row #example tr.even:hover td.sorting_3 {
+ background-color: #E2FF89;
+}
+
+.ex_highlight_row #example tr.odd:hover {
+ background-color: #E6FF99;
+}
+
+.ex_highlight_row #example tr.odd:hover td.sorting_1 {
+ background-color: #D6FF5C;
+}
+
+.ex_highlight_row #example tr.odd:hover td.sorting_2 {
+ background-color: #E0FF84;
+}
+
+.ex_highlight_row #example tr.odd:hover td.sorting_3 {
+ background-color: #DBFF70;
+}
+
+
+/*
+ * KeyTable
+ */
+table.KeyTable td {
+ border: 3px solid transparent;
+}
+
+table.KeyTable td.focus {
+ border: 3px solid #3366FF;
+}
+
+table.display tr.gradeA {
+ background-color: #eeffee;
+}
+
+table.display tr.gradeC {
+ background-color: #ddddff;
+}
+
+table.display tr.gradeX {
+ background-color: #ffdddd;
+}
+
+table.display tr.gradeU {
+ background-color: #ddd;
+}
+
+div.box {
+ height: 100px;
+ padding: 10px;
+ overflow: auto;
+ border: 1px solid #8080FF;
+ background-color: #E5E5FF;
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/css/jui-dt.css b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/css/jui-dt.css
new file mode 100644
index 0000000..89bd2f0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/css/jui-dt.css
@@ -0,0 +1,322 @@
+/*
+ * File: demo_table_jui.css
+ * CVS: $Id$
+ * Description: CSS descriptions for DataTables demo pages
+ * Author: Allan Jardine
+ * Created: Tue May 12 06:47:22 BST 2009
+ * Modified: $Date$ by $Author$
+ * Language: CSS
+ * Project: DataTables
+ *
+ * Copyright 2009 Allan Jardine. All Rights Reserved.
+ *
+ * ***************************************************************************
+ * DESCRIPTION
+ *
+ * The styles given here are suitable for the demos that are used with the standard DataTables
+ * distribution (see www.datatables.net). You will most likely wish to modify these styles to
+ * meet the layout requirements of your site.
+ *
+ * Common issues:
+ * 'full_numbers' pagination - I use an extra selector on the body tag to ensure that there is
+ * no conflict between the two pagination types. If you want to use full_numbers pagination
+ * ensure that you either have "example_alt_pagination" as a body class name, or better yet,
+ * modify that selector.
+ * Note that the path used for Images is relative. All images are by default located in
+ * ../images/ - relative to this CSS file.
+ */
+
+
+/*
+ * jQuery UI specific styling
+ */
+
+.paging_two_button .ui-button {
+ float: left;
+ cursor: pointer;
+ * cursor: hand;
+}
+
+.paging_full_numbers .ui-button {
+ padding: 2px 6px;
+ margin: 0;
+ cursor: pointer;
+ * cursor: hand;
+}
+
+.ui-buttonset .ui-button {
+ margin-right: -0.1em !important;
+}
+
+.paging_full_numbers {
+ width: 350px !important;
+}
+
+.ui-toolbar {
+ padding: 5px;
+}
+
+.dataTables_paginate {
+ width: auto;
+}
+
+.dataTables_info {
+ padding-top: 3px;
+}
+
+table.display thead th {
+ padding: 3px 0px 3px 10px;
+ cursor: pointer;
+ * cursor: hand;
+}
+
+div.dataTables_wrapper .ui-widget-header {
+ font-weight: normal;
+}
+
+
+/*
+ * Sort arrow icon positioning
+ */
+table.display thead th div.DataTables_sort_wrapper {
+ position: relative;
+ padding-right: 20px;
+ padding-right: 20px;
+}
+
+table.display thead th div.DataTables_sort_wrapper span {
+ position: absolute;
+ top: 50%;
+ margin-top: -8px;
+ right: 0;
+}
+
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ *
+ * Everything below this line is the same as demo_table.css. This file is
+ * required for 'cleanliness' of the markup
+ *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables features
+ */
+
+.dataTables_wrapper {
+ position: relative;
+ min-height: 302px;
+ _height: 302px;
+ clear: both;
+}
+
+.dataTables_processing {
+ position: absolute;
+ top: 0px;
+ left: 50%;
+ width: 250px;
+ margin-left: -125px;
+ border: 1px solid #ddd;
+ text-align: center;
+ color: #999;
+ font-size: 11px;
+ padding: 2px 0;
+}
+
+.dataTables_length {
+ width: 40%;
+ float: left;
+}
+
+.dataTables_filter {
+ width: 50%;
+ float: right;
+ text-align: right;
+}
+
+.dataTables_info {
+ width: 50%;
+ float: left;
+}
+
+.dataTables_paginate {
+ float: right;
+ text-align: right;
+}
+
+/* Pagination nested */
+.paginate_disabled_previous, .paginate_enabled_previous, .paginate_disabled_next, .paginate_enabled_next {
+ height: 19px;
+ width: 19px;
+ margin-left: 3px;
+ float: left;
+}
+
+.paginate_disabled_previous {
+ background-image: url('../images/back_disabled.jpg');
+}
+
+.paginate_enabled_previous {
+ background-image: url('../images/back_enabled.jpg');
+}
+
+.paginate_disabled_next {
+ background-image: url('../images/forward_disabled.jpg');
+}
+
+.paginate_enabled_next {
+ background-image: url('../images/forward_enabled.jpg');
+}
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables display
+ */
+table.display {
+ margin: 0 auto;
+ width: 100%;
+ clear: both;
+ border-collapse: collapse;
+}
+
+table.display tfoot th {
+ padding: 3px 0px 3px 10px;
+ font-weight: bold;
+ font-weight: normal;
+}
+
+table.display tr.heading2 td {
+ border-bottom: 1px solid #aaa;
+}
+
+table.display td {
+ padding: 3px 10px;
+}
+
+table.display td.center {
+ text-align: center;
+}
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables sorting
+ */
+
+.sorting_asc {
+ background: url('../images/sort_asc.jpg') no-repeat center right;
+}
+
+.sorting_desc {
+ background: url('../images/sort_desc.jpg') no-repeat center right;
+}
+
+.sorting {
+ background: url('../images/sort_both.jpg') no-repeat center right;
+}
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Misc
+ */
+.dataTables_scroll {
+ clear: both;
+}
+
+.top, .bottom {
+ padding: 15px;
+ background-color: #F5F5F5;
+ border: 1px solid #CCCCCC;
+}
+
+.top .dataTables_info {
+ float: none;
+}
+
+.clear {
+ clear: both;
+}
+
+.dataTables_empty {
+ text-align: center;
+}
+
+tfoot input {
+ margin: 0.5em 0;
+ width: 100%;
+ color: #444;
+}
+
+tfoot input.search_init {
+ color: #999;
+}
+
+td.group {
+ background-color: #d1cfd0;
+ border-bottom: 2px solid #A19B9E;
+ border-top: 2px solid #A19B9E;
+}
+
+td.details {
+ background-color: #d1cfd0;
+ border: 2px solid #A19B9E;
+}
+
+
+.example_alt_pagination div.dataTables_info {
+ width: 40%;
+}
+
+.paging_full_numbers span.paginate_button,
+ .paging_full_numbers span.paginate_active {
+ border: 1px solid #aaa;
+ -webkit-border-radius: 5px;
+ -moz-border-radius: 5px;
+ padding: 2px 5px;
+ margin: 0 3px;
+ cursor: pointer;
+ *cursor: hand;
+}
+
+.paging_full_numbers span.paginate_button {
+ background-color: #ddd;
+}
+
+.paging_full_numbers span.paginate_button:hover {
+ background-color: #ccc;
+}
+
+.paging_full_numbers span.paginate_active {
+ background-color: #99B3FF;
+}
+
+table.display tr.even.row_selected td {
+ background-color: #B0BED9;
+}
+
+table.display tr.odd.row_selected td {
+ background-color: #9FAFD1;
+}
+
+/* Striping */
+tr.odd { background: rgba(255, 255, 255, 0.1); }
+tr.even { background: rgba(0, 0, 255, 0.05); }
+
+
+/*
+ * Sorting classes for columns
+ */
+tr.odd td.sorting_1 { background: rgba(0, 0, 0, 0.03); }
+tr.odd td.sorting_2 { background: rgba(0, 0, 0, 0.02); }
+tr.odd td.sorting_3 { background: rgba(0, 0, 0, 0.02); }
+tr.even td.sorting_1 { background: rgba(0, 0, 0, 0.08); }
+tr.even td.sorting_2 { background: rgba(0, 0, 0, 0.06); }
+tr.even td.sorting_3 { background: rgba(0, 0, 0, 0.06); }
+
+.css_left { position: relative; float: left; }
+.css_right { position: relative; float: right; }
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/Sorting icons.psd b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/Sorting icons.psd
new file mode 100644
index 0000000..53b2e06
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/Sorting icons.psd
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/back_disabled.jpg b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/back_disabled.jpg
new file mode 100644
index 0000000..1e73a54
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/back_disabled.jpg
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/back_enabled.jpg b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/back_enabled.jpg
new file mode 100644
index 0000000..a6d764c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/back_enabled.jpg
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/favicon.ico b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/favicon.ico
new file mode 100644
index 0000000..6eeaa2a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/favicon.ico
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/forward_disabled.jpg b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/forward_disabled.jpg
new file mode 100644
index 0000000..28a9dc5
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/forward_disabled.jpg
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/forward_enabled.jpg b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/forward_enabled.jpg
new file mode 100644
index 0000000..598c075
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/forward_enabled.jpg
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/sort_asc.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/sort_asc.png
new file mode 100644
index 0000000..a56d0e2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/sort_asc.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/sort_asc_disabled.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/sort_asc_disabled.png
new file mode 100644
index 0000000..b7e621e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/sort_asc_disabled.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/sort_both.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/sort_both.png
new file mode 100644
index 0000000..839ac4b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/sort_both.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/sort_desc.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/sort_desc.png
new file mode 100644
index 0000000..90b2951
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/sort_desc.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/sort_desc_disabled.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/sort_desc_disabled.png
new file mode 100644
index 0000000..2409653
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/images/sort_desc_disabled.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/js/jquery.dataTables.min.js.gz b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/js/jquery.dataTables.min.js.gz
new file mode 100644
index 0000000..cff03cf8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.7.5/js/jquery.dataTables.min.js.gz
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/hadoop-st.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/hadoop-st.png
new file mode 100644
index 0000000..b481c04
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/hadoop-st.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js.gz b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js.gz
new file mode 100644
index 0000000..2aac85f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js.gz
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_black_matte.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_black_matte.png
new file mode 100644
index 0000000..182cc0e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_black_matte.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_black_tie.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_black_tie.png
new file mode 100644
index 0000000..4ea6693
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_black_tie.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_blitzer.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_blitzer.png
new file mode 100644
index 0000000..5dde122
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_blitzer.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_cupertino.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_cupertino.png
new file mode 100644
index 0000000..0d9f11a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_cupertino.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_dark_hive.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_dark_hive.png
new file mode 100644
index 0000000..66ab870
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_dark_hive.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_dot_luv.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_dot_luv.png
new file mode 100644
index 0000000..2f7bf69
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_dot_luv.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_eggplant.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_eggplant.png
new file mode 100644
index 0000000..92e3d5a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_eggplant.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_excite_bike.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_excite_bike.png
new file mode 100644
index 0000000..5f58e82
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_excite_bike.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_flick.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_flick.png
new file mode 100644
index 0000000..4baa549f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_flick.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_hot_sneaks.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_hot_sneaks.png
new file mode 100644
index 0000000..cde01b3
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_hot_sneaks.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_humanity.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_humanity.png
new file mode 100644
index 0000000..891b39e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_humanity.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_le_frog.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_le_frog.png
new file mode 100644
index 0000000..80d0d8b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_le_frog.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_mint_choco.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_mint_choco.png
new file mode 100644
index 0000000..97d2c87
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_mint_choco.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_overcast.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_overcast.png
new file mode 100644
index 0000000..b1e5dcc
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_overcast.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_pepper_grinder.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_pepper_grinder.png
new file mode 100644
index 0000000..97b5d73
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_pepper_grinder.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_smoothness.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_smoothness.png
new file mode 100644
index 0000000..fa0f8fe
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_smoothness.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_south_street.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_south_street.png
new file mode 100644
index 0000000..1545fd0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_south_street.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_start_menu.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_start_menu.png
new file mode 100644
index 0000000..abd9b67
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_start_menu.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_sunny.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_sunny.png
new file mode 100644
index 0000000..32867b9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_sunny.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_swanky_purse.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_swanky_purse.png
new file mode 100644
index 0000000..4ae2a33
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_swanky_purse.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_trontastic.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_trontastic.png
new file mode 100644
index 0000000..f77b455
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_trontastic.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_ui_dark.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_ui_dark.png
new file mode 100644
index 0000000..80e0fe8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_ui_dark.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_ui_light.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_ui_light.png
new file mode 100644
index 0000000..04544e3
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_ui_light.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_windoze.png b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_windoze.png
new file mode 100644
index 0000000..2b8c3bd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/theme/theme_90_windoze.png
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/themeswitcher.js.gz b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/themeswitcher.js.gz
new file mode 100644
index 0000000..7814135
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/themeswitcher.js.gz
Binary files differ
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.css b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.css
new file mode 100644
index 0000000..e0e40b5
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.css
@@ -0,0 +1,28 @@
+/* Styles for YARN */
+* { margin: 0; border: 0 }
+html, body { height: 100% }
+body { padding: 0; font: 90% sans-serif }
+a { text-decoration: none }
+a:hover { text-decoration: underline }
+.content { padding-right: 1em }
+.content h1, .content h2, .content h3 { margin: 0 0 0.3em; font-weight: normal }
+table { border-collapse: collapse; border-spacing: 0; width: 100% }
+table br { display: none }
+table.info th { text-align: right }
+.info-wrap { margin: 0 0 1em }
+th, td { padding: 0.2em 0.5em 0 }
+td.table { padding: 0 }
+.ui-dialog, .shadow {
+ -moz-box-shadow: 0 8px 38px #000;
+ -webkit-box-shadow: 0 8px 38px #000;
+ box-shadow: 0 8px 38px #000 }
+/* styles for common objects */
+#logo { float: left; position: relative; line-height: 0.5em; top: -1.3em }
+#user { float: right; position: relative; top: -1.5em; font-size: 0.77em }
+#header { padding: 1.5em 0.5em; text-align: center }
+#nav h3 { padding: 0 0 0 1.6em }
+#nav ul {
+ padding: 0.3em 1em 0.8em 2em; line-height: 0.5em; list-style: none;
+ line-height: 1.2em; font-size: 90% }
+#themeswitcher { margin: 1em 0.25em }
+#footer { padding: 1em; text-align: center }
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.dt.plugins.js b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.dt.plugins.js
new file mode 100644
index 0000000..4d00706
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.dt.plugins.js
@@ -0,0 +1,52 @@
+if (!jQuery.fn.dataTableExt.fnVersionCheck("1.7.5")) {
+ alert("These plugins requires dataTables 1.7.5+");
+}
+
+// 'title-numeric' sort type
+jQuery.fn.dataTableExt.oSort['title-numeric-asc'] = function(a,b) {
+ var x = a.match(/title=["']?(-?\d+\.?\d*)/)[1];
+ var y = b.match(/title=["']?(-?\d+\.?\d*)/)[1];
+ x = parseFloat( x );
+ y = parseFloat( y );
+ return ((x < y) ? -1 : ((x > y) ? 1 : 0));
+};
+
+jQuery.fn.dataTableExt.oSort['title-numeric-desc'] = function(a,b) {
+ var x = a.match(/title=["']?(-?\d+\.?\d*)/)[1];
+ var y = b.match(/title=["']?(-?\d+\.?\d*)/)[1];
+ x = parseFloat( x );
+ y = parseFloat( y );
+ return ((x < y) ? 1 : ((x > y) ? -1 : 0));
+};
+
+jQuery.fn.dataTableExt.oApi.fnSetFilteringDelay = function ( oSettings, iDelay ) {
+ var
+ _that = this,
+ iDelay = (typeof iDelay == 'undefined') ? 250 : iDelay;
+
+ this.each( function ( i ) {
+ $.fn.dataTableExt.iApiIndex = i;
+ var
+ $this = this,
+ oTimerId = null,
+ sPreviousSearch = null,
+ anControl = $( 'input', _that.fnSettings().aanFeatures.f );
+
+ anControl.unbind( 'keyup' ).bind( 'keyup', function() {
+ var $$this = $this;
+
+ if (sPreviousSearch === null || sPreviousSearch != anControl.val()) {
+ window.clearTimeout(oTimerId);
+ sPreviousSearch = anControl.val();
+ oSettings.oApi._fnProcessingDisplay(oSettings, true);
+ oTimerId = window.setTimeout(function() {
+ $.fn.dataTableExt.iApiIndex = i;
+ _that.fnFilter( anControl.val() );
+ oSettings.oApi._fnProcessingDisplay(oSettings, false);
+ }, iDelay);
+ }
+ });
+ return this;
+ } );
+ return this;
+}
diff --git a/mapreduce/src/test/empty-file b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/test/.keep
similarity index 100%
copy from mapreduce/src/test/empty-file
copy to hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/test/.keep
diff --git a/mapreduce/src/test/empty-file b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/yarn/.keep
similarity index 100%
copy from mapreduce/src/test/empty-file
copy to hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/yarn/.keep
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java
new file mode 100644
index 0000000..378aaca
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java
@@ -0,0 +1,180 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn;
+
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.util.Records;
+
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
+
+/**
+ * Utilities to generate fake test apps
+ */
+public class MockApps {
+ static final Iterator<String> NAMES = Iterators.cycle("SleepJob",
+ "RandomWriter", "TeraSort", "TeraGen", "PigLatin", "WordCount",
+ "I18nApp<☯>");
+ static final Iterator<String> USERS = Iterators.cycle("dorothy", "tinman",
+ "scarecrow", "glinda", "nikko", "toto", "winkie", "zeke", "gulch");
+ static final Iterator<ApplicationState> STATES = Iterators.cycle(
+ ApplicationState.values());
+ static final Iterator<String> QUEUES = Iterators.cycle("a.a1", "a.a2",
+ "b.b1", "b.b2", "b.b3", "c.c1.c11", "c.c1.c12", "c.c1.c13",
+ "c.c2", "c.c3", "c.c4");
+ static final long TS = System.currentTimeMillis();
+
+ public static String newAppName() {
+ synchronized(NAMES) {
+ return NAMES.next();
+ }
+ }
+
+ public static String newUserName() {
+ synchronized(USERS) {
+ return USERS.next();
+ }
+ }
+
+ public static String newQueue() {
+ synchronized(QUEUES) {
+ return QUEUES.next();
+ }
+ }
+
+ public static List<ApplicationReport> genApps(int n) {
+ List<ApplicationReport> list = Lists.newArrayList();
+ for (int i = 0; i < n; ++i) {
+ list.add(newApp(i));
+ }
+ return list;
+ }
+
+ public static ApplicationReport newApp(int i) {
+ final ApplicationId id = newAppID(i);
+ final ApplicationState state = newAppState();
+ final String user = newUserName();
+ final String name = newAppName();
+ final String queue = newQueue();
+ return new ApplicationReport() {
+ @Override public ApplicationId getApplicationId() { return id; }
+ @Override public String getUser() { return user; }
+ @Override public String getName() { return name; }
+ @Override public ApplicationState getState() { return state; }
+ @Override public String getQueue() { return queue; }
+ @Override public String getTrackingUrl() { return ""; }
+ @Override
+ public void setApplicationId(ApplicationId applicationId) {
+ // TODO Auto-generated method stub
+
+ }
+ @Override
+ public void setTrackingUrl(String url) {
+ // TODO Auto-generated method stub
+
+ }
+ @Override
+ public void setName(String name) {
+ // TODO Auto-generated method stub
+
+ }
+ @Override
+ public void setQueue(String queue) {
+ // TODO Auto-generated method stub
+
+ }
+ @Override
+ public void setState(ApplicationState state) {
+ // TODO Auto-generated method stub
+
+ }
+ @Override
+ public void setUser(String user) {
+ // TODO Auto-generated method stub
+
+ }
+ @Override
+ public String getDiagnostics() {
+ // TODO Auto-generated method stub
+ return null;
+ }
+ @Override
+ public void setDiagnostics(String diagnostics) {
+ // TODO Auto-generated method stub
+
+ }
+ @Override
+ public String getHost() {
+ // TODO Auto-generated method stub
+ return null;
+ }
+ @Override
+ public void setHost(String host) {
+ // TODO Auto-generated method stub
+
+ }
+ @Override
+ public int getRpcPort() {
+ // TODO Auto-generated method stub
+ return 0;
+ }
+ @Override
+ public void setRpcPort(int rpcPort) {
+ // TODO Auto-generated method stub
+
+ }
+ @Override
+ public String getClientToken() {
+ // TODO Auto-generated method stub
+ return null;
+ }
+ @Override
+ public void setClientToken(String clientToken) {
+ // TODO Auto-generated method stub
+
+ }
+ };
+ }
+
+ public static ApplicationId newAppID(int i) {
+ ApplicationId id = Records.newRecord(ApplicationId.class);
+ id.setClusterTimestamp(TS);
+ id.setId(i);
+ return id;
+ }
+
+ public static ApplicationAttemptId newAppAttemptID(ApplicationId appId, int i) {
+ ApplicationAttemptId id = Records.newRecord(ApplicationAttemptId.class);
+ id.setApplicationId(appId);
+ id.setAttemptId(i);
+ return id;
+ }
+
+ public static ApplicationState newAppState() {
+ synchronized(STATES) {
+ return STATES.next();
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
new file mode 100644
index 0000000..28ed4cf
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
@@ -0,0 +1,152 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn;
+
+import java.net.InetSocketAddress;
+
+import junit.framework.Assert;
+
+import org.apache.avro.ipc.Server;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.factory.providers.YarnRemoteExceptionFactoryProvider;
+import org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.junit.Test;
+
+public class TestRPC {
+
+ private static final String EXCEPTION_MSG = "test error";
+ private static final String EXCEPTION_CAUSE = "exception cause";
+ private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+// @Test
+// public void testAvroRPC() throws Exception {
+// test(AvroYarnRPC.class.getName());
+// }
+//
+// @Test
+// public void testHadoopNativeRPC() throws Exception {
+// test(HadoopYarnRPC.class.getName());
+// }
+
+ @Test
+ public void testHadoopProtoRPC() throws Exception {
+ test(HadoopYarnProtoRPC.class.getName());
+ }
+
+ private void test(String rpcClass) throws Exception {
+ Configuration conf = new Configuration();
+ conf.set(YarnRPC.RPC_CLASSNAME, rpcClass);
+ YarnRPC rpc = YarnRPC.create(conf);
+ String bindAddr = "localhost:0";
+ InetSocketAddress addr = NetUtils.createSocketAddr(bindAddr);
+ Server server = rpc.getServer(ContainerManager.class,
+ new DummyContainerManager(), addr, conf, null, 1);
+ server.start();
+ ContainerManager proxy = (ContainerManager)
+ rpc.getProxy(ContainerManager.class,
+ NetUtils.createSocketAddr("localhost:" + server.getPort()), conf);
+ ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
+ containerLaunchContext.setUser("dummy-user");
+ containerLaunchContext.setContainerId(recordFactory.newRecordInstance(ContainerId.class));
+ containerLaunchContext.getContainerId().setAppId(recordFactory.newRecordInstance(ApplicationId.class));
+ containerLaunchContext.getContainerId().getAppId().setId(0);
+ containerLaunchContext.getContainerId().setId(100);
+ containerLaunchContext.setResource(recordFactory.newRecordInstance(Resource.class));
+// containerLaunchContext.env = new HashMap<CharSequence, CharSequence>();
+// containerLaunchContext.command = new ArrayList<CharSequence>();
+
+ StartContainerRequest scRequest = recordFactory.newRecordInstance(StartContainerRequest.class);
+ scRequest.setContainerLaunchContext(containerLaunchContext);
+ proxy.startContainer(scRequest);
+
+ GetContainerStatusRequest gcsRequest = recordFactory.newRecordInstance(GetContainerStatusRequest.class);
+ gcsRequest.setContainerId(containerLaunchContext.getContainerId());
+ GetContainerStatusResponse response = proxy.getContainerStatus(gcsRequest);
+ ContainerStatus status = response.getStatus();
+
+ //test remote exception
+ boolean exception = false;
+ try {
+ StopContainerRequest stopRequest = recordFactory.newRecordInstance(StopContainerRequest.class);
+ stopRequest.setContainerId(containerLaunchContext.getContainerId());
+ proxy.stopContainer(stopRequest);
+ } catch (YarnRemoteException e) {
+ exception = true;
+ System.err.println(e.getMessage());
+ System.err.println(e.getCause().getMessage());
+ Assert.assertTrue(EXCEPTION_MSG.equals(e.getMessage()));
+ Assert.assertTrue(EXCEPTION_CAUSE.equals(e.getCause().getMessage()));
+ System.out.println("Test Exception is " + RPCUtil.toString(e));
+ }
+ Assert.assertTrue(exception);
+
+ server.close();
+ Assert.assertNotNull(status);
+ Assert.assertEquals(ContainerState.RUNNING, status.getState().RUNNING);
+ }
+
+ public class DummyContainerManager implements ContainerManager {
+
+ private ContainerStatus status = null;
+
+ @Override
+ public GetContainerStatusResponse getContainerStatus(GetContainerStatusRequest request) throws YarnRemoteException {
+ GetContainerStatusResponse response = recordFactory.newRecordInstance(GetContainerStatusResponse.class);
+ response.setStatus(status);
+ return response;
+ }
+
+ @Override
+ public StartContainerResponse startContainer(StartContainerRequest request) throws YarnRemoteException {
+ ContainerLaunchContext container = request.getContainerLaunchContext();
+ StartContainerResponse response = recordFactory.newRecordInstance(StartContainerResponse.class);
+ status = recordFactory.newRecordInstance(ContainerStatus.class);
+ status.setState(ContainerState.RUNNING);
+ status.setContainerId(container.getContainerId());
+ status.setExitStatus(String.valueOf(0));
+ return response;
+ }
+
+ @Override
+ public StopContainerResponse stopContainer(StopContainerRequest request) throws YarnRemoteException {
+ Exception e = new Exception(EXCEPTION_MSG,
+ new Exception(EXCEPTION_CAUSE));
+ throw YarnRemoteExceptionFactoryProvider.getYarnRemoteExceptionFactory(null).createYarnRemoteException(e);
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPCFactories.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPCFactories.java
new file mode 100644
index 0000000..deab52d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPCFactories.java
@@ -0,0 +1,111 @@
+package org.apache.hadoop.yarn;
+
+import java.net.InetSocketAddress;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.yarn.api.AMRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.impl.pb.RpcClientFactoryPBImpl;
+import org.apache.hadoop.yarn.factories.impl.pb.RpcServerFactoryPBImpl;
+import org.junit.Test;
+
+public class TestRPCFactories {
+
+
+
+ @Test
+ public void test() {
+ testPbServerFactory();
+
+ testPbClientFactory();
+ }
+
+
+
+ private void testPbServerFactory() {
+ InetSocketAddress addr = new InetSocketAddress(0);
+ Configuration conf = new Configuration();
+ AMRMProtocol instance = new AMRMProtocolTestImpl();
+ Server server = null;
+ try {
+ server =
+ RpcServerFactoryPBImpl.get().getServer(
+ AMRMProtocol.class, instance, addr, conf, null, 1);
+ server.start();
+ } catch (YarnException e) {
+ e.printStackTrace();
+ Assert.fail("Failed to create server");
+ } finally {
+ if (server != null) {
+ server.stop();
+ }
+ }
+ }
+
+
+ private void testPbClientFactory() {
+ InetSocketAddress addr = new InetSocketAddress(0);
+ System.err.println(addr.getHostName() + addr.getPort());
+ Configuration conf = new Configuration();
+ AMRMProtocol instance = new AMRMProtocolTestImpl();
+ Server server = null;
+ try {
+ server =
+ RpcServerFactoryPBImpl.get().getServer(
+ AMRMProtocol.class, instance, addr, conf, null, 1);
+ server.start();
+ System.err.println(server.getListenerAddress());
+ System.err.println(NetUtils.getConnectAddress(server));
+
+ AMRMProtocol amrmClient = null;
+ try {
+ amrmClient = (AMRMProtocol) RpcClientFactoryPBImpl.get().getClient(AMRMProtocol.class, 1, NetUtils.getConnectAddress(server), conf);
+ } catch (YarnException e) {
+ e.printStackTrace();
+ Assert.fail("Failed to create client");
+ }
+
+ } catch (YarnException e) {
+ e.printStackTrace();
+ Assert.fail("Failed to create server");
+ } finally {
+ if (server != null) {
+ server.stop();
+ }
+ }
+ }
+
+ public class AMRMProtocolTestImpl implements AMRMProtocol {
+
+ @Override
+ public RegisterApplicationMasterResponse registerApplicationMaster(
+ RegisterApplicationMasterRequest request) throws YarnRemoteException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public FinishApplicationMasterResponse finishApplicationMaster(
+ FinishApplicationMasterRequest request) throws YarnRemoteException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public AllocateResponse allocate(AllocateRequest request)
+ throws YarnRemoteException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRecordFactory.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRecordFactory.java
new file mode 100644
index 0000000..8c85b13
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRecordFactory.java
@@ -0,0 +1,36 @@
+package org.apache.hadoop.yarn;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateRequestPBImpl;
+import org.apache.hadoop.yarn.api.records.AMResponse;
+import org.apache.hadoop.yarn.api.records.impl.pb.AMResponsePBImpl;
+import org.junit.Test;
+
+public class TestRecordFactory {
+
+ @Test
+ public void testPbRecordFactory() {
+ RecordFactory pbRecordFactory = RecordFactoryPBImpl.get();
+
+ try {
+ AMResponse response = pbRecordFactory.newRecordInstance(AMResponse.class);
+ Assert.assertEquals(AMResponsePBImpl.class, response.getClass());
+ } catch (YarnException e) {
+ e.printStackTrace();
+ Assert.fail("Failed to crete record");
+ }
+
+ try {
+ AllocateRequest response = pbRecordFactory.newRecordInstance(AllocateRequest.class);
+ Assert.assertEquals(AllocateRequestPBImpl.class, response.getClass());
+ } catch (YarnException e) {
+ e.printStackTrace();
+ Assert.fail("Failed to crete record");
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRpcFactoryProvider.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRpcFactoryProvider.java
new file mode 100644
index 0000000..c8c332e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRpcFactoryProvider.java
@@ -0,0 +1,54 @@
+package org.apache.hadoop.yarn;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.factories.RpcClientFactory;
+import org.apache.hadoop.yarn.factories.RpcServerFactory;
+import org.apache.hadoop.yarn.factories.impl.pb.RpcClientFactoryPBImpl;
+import org.apache.hadoop.yarn.factories.impl.pb.RpcServerFactoryPBImpl;
+import org.apache.hadoop.yarn.factory.providers.RpcFactoryProvider;
+import org.junit.Test;
+
+public class TestRpcFactoryProvider {
+
+ @Test
+ public void testFactoryProvider() {
+ Configuration conf = new Configuration();
+ RpcClientFactory clientFactory = null;
+ RpcServerFactory serverFactory = null;
+
+
+ clientFactory = RpcFactoryProvider.getClientFactory(conf);
+ serverFactory = RpcFactoryProvider.getServerFactory(conf);
+ Assert.assertEquals(RpcClientFactoryPBImpl.class, clientFactory.getClass());
+ Assert.assertEquals(RpcServerFactoryPBImpl.class, serverFactory.getClass());
+
+ conf.set(RpcFactoryProvider.RPC_SERIALIZER_KEY, "writable");
+ try {
+ clientFactory = RpcFactoryProvider.getClientFactory(conf);
+ Assert.fail("Expected an exception - unknown serializer");
+ } catch (YarnException e) {
+ }
+ try {
+ serverFactory = RpcFactoryProvider.getServerFactory(conf);
+ Assert.fail("Expected an exception - unknown serializer");
+ } catch (YarnException e) {
+ }
+
+ conf = new Configuration();
+ conf.set(RpcFactoryProvider.RPC_CLIENT_FACTORY_CLASS_KEY, "NonExistantClass");
+ conf.set(RpcFactoryProvider.RPC_SERVER_FACTORY_CLASS_KEY, RpcServerFactoryPBImpl.class.getName());
+
+ try {
+ clientFactory = RpcFactoryProvider.getClientFactory(conf);
+ Assert.fail("Expected an exception - unknown class");
+ } catch (YarnException e) {
+ }
+ try {
+ serverFactory = RpcFactoryProvider.getServerFactory(conf);
+ } catch (YarnException e) {
+ Assert.fail("Error while loading factory using reflection: [" + RpcServerFactoryPBImpl.class.getName() + "]");
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
new file mode 100644
index 0000000..8a61f6f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
@@ -0,0 +1,86 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.event;
+
+import java.util.HashMap;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+
+@SuppressWarnings("rawtypes")
+public class DrainDispatcher extends AsyncDispatcher {
+
+// flagrant initialize abuse throughout, but safe per
+// http://java.sun.com/docs/books/jls/third_edition/html/typesValues.html#96595
+// and similar grotesqueries
+ private volatile boolean drained = false;
+ private final BlockingQueue<Event> queue;
+
+ public DrainDispatcher() {
+ this(new LinkedBlockingQueue<Event>());
+ }
+
+ private DrainDispatcher(BlockingQueue<Event> eventQueue) {
+ super(new HashMap<Class<? extends Enum>, EventHandler>(), eventQueue);
+ this.queue = eventQueue;
+ }
+
+ /**
+ * Busy loop waiting for all queued events to drain.
+ */
+ public void await() {
+ while (!drained) {
+ Thread.yield();
+ }
+ }
+
+ @Override
+ Runnable createThread() {
+ return new Runnable() {
+ @Override
+ public void run() {
+ while (!Thread.currentThread().isInterrupted()) {
+ // !drained if dispatch queued new events on this dispatcher
+ drained = queue.isEmpty();
+ Event event;
+ try {
+ event = queue.take();
+ } catch(InterruptedException ie) {
+ return;
+ }
+ if (event != null) {
+ dispatch(event);
+ }
+ }
+ }
+ };
+ }
+
+ @Override
+ public EventHandler getEventHandler() {
+ final EventHandler actual = super.getEventHandler();
+ return new EventHandler() {
+ @Override
+ public void handle(Event event) {
+ drained = false;
+ actual.handle(event);
+ }
+ };
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLinuxResourceCalculatorPlugin.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLinuxResourceCalculatorPlugin.java
new file mode 100644
index 0000000..ce9fedd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLinuxResourceCalculatorPlugin.java
@@ -0,0 +1,235 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.fs.Path;
+import org.junit.Test;
+
+/**
+ * A JUnit test to test {@link LinuxResourceCalculatorPlugin}
+ * Create the fake /proc/ information and verify the parsing and calculation
+ */
+public class TestLinuxResourceCalculatorPlugin extends TestCase {
+ /**
+ * LinuxResourceCalculatorPlugin with a fake timer
+ */
+ static class FakeLinuxResourceCalculatorPlugin extends
+ LinuxResourceCalculatorPlugin {
+
+ long currentTime = 0;
+ public FakeLinuxResourceCalculatorPlugin(String procfsMemFile,
+ String procfsCpuFile,
+ String procfsStatFile,
+ long jiffyLengthInMillis) {
+ super(procfsMemFile, procfsCpuFile, procfsStatFile, jiffyLengthInMillis);
+ }
+ @Override
+ long getCurrentTime() {
+ return currentTime;
+ }
+ public void advanceTime(long adv) {
+ currentTime += adv * jiffyLengthInMillis;
+ }
+ }
+ private static final FakeLinuxResourceCalculatorPlugin plugin;
+ private static String TEST_ROOT_DIR = new Path(System.getProperty(
+ "test.build.data", "/tmp")).toString().replace(' ', '+');
+ private static final String FAKE_MEMFILE;
+ private static final String FAKE_CPUFILE;
+ private static final String FAKE_STATFILE;
+ private static final long FAKE_JIFFY_LENGTH = 10L;
+ static {
+ int randomNum = (new Random()).nextInt(1000000000);
+ FAKE_MEMFILE = TEST_ROOT_DIR + File.separator + "MEMINFO_" + randomNum;
+ FAKE_CPUFILE = TEST_ROOT_DIR + File.separator + "CPUINFO_" + randomNum;
+ FAKE_STATFILE = TEST_ROOT_DIR + File.separator + "STATINFO_" + randomNum;
+ plugin = new FakeLinuxResourceCalculatorPlugin(FAKE_MEMFILE, FAKE_CPUFILE,
+ FAKE_STATFILE,
+ FAKE_JIFFY_LENGTH);
+ }
+ static final String MEMINFO_FORMAT =
+ "MemTotal: %d kB\n" +
+ "MemFree: %d kB\n" +
+ "Buffers: 138244 kB\n" +
+ "Cached: 947780 kB\n" +
+ "SwapCached: 142880 kB\n" +
+ "Active: 3229888 kB\n" +
+ "Inactive: %d kB\n" +
+ "SwapTotal: %d kB\n" +
+ "SwapFree: %d kB\n" +
+ "Dirty: 122012 kB\n" +
+ "Writeback: 0 kB\n" +
+ "AnonPages: 2710792 kB\n" +
+ "Mapped: 24740 kB\n" +
+ "Slab: 132528 kB\n" +
+ "SReclaimable: 105096 kB\n" +
+ "SUnreclaim: 27432 kB\n" +
+ "PageTables: 11448 kB\n" +
+ "NFS_Unstable: 0 kB\n" +
+ "Bounce: 0 kB\n" +
+ "CommitLimit: 4125904 kB\n" +
+ "Committed_AS: 4143556 kB\n" +
+ "VmallocTotal: 34359738367 kB\n" +
+ "VmallocUsed: 1632 kB\n" +
+ "VmallocChunk: 34359736375 kB\n" +
+ "HugePages_Total: 0\n" +
+ "HugePages_Free: 0\n" +
+ "HugePages_Rsvd: 0\n" +
+ "Hugepagesize: 2048 kB";
+
+ static final String CPUINFO_FORMAT =
+ "processor : %s\n" +
+ "vendor_id : AuthenticAMD\n" +
+ "cpu family : 15\n" +
+ "model : 33\n" +
+ "model name : Dual Core AMD Opteron(tm) Processor 280\n" +
+ "stepping : 2\n" +
+ "cpu MHz : %f\n" +
+ "cache size : 1024 KB\n" +
+ "physical id : 0\n" +
+ "siblings : 2\n" +
+ "core id : 0\n" +
+ "cpu cores : 2\n" +
+ "fpu : yes\n" +
+ "fpu_exception : yes\n" +
+ "cpuid level : 1\n" +
+ "wp : yes\n" +
+ "flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov " +
+ "pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt lm " +
+ "3dnowext 3dnow pni lahf_lm cmp_legacy\n" +
+ "bogomips : 4792.41\n" +
+ "TLB size : 1024 4K pages\n" +
+ "clflush size : 64\n" +
+ "cache_alignment : 64\n" +
+ "address sizes : 40 bits physical, 48 bits virtual\n" +
+ "power management: ts fid vid ttp";
+
+ static final String STAT_FILE_FORMAT =
+ "cpu %d %d %d 1646495089 831319 48713 164346 0\n" +
+ "cpu0 15096055 30805 3823005 411456015 206027 13 14269 0\n" +
+ "cpu1 14760561 89890 6432036 408707910 456857 48074 130857 0\n" +
+ "cpu2 12761169 20842 3758639 413976772 98028 411 10288 0\n" +
+ "cpu3 12355207 47322 5789691 412354390 70406 213 8931 0\n" +
+ "intr 114648668 20010764 2 0 945665 2 0 0 0 0 0 0 0 4 0 0 0 0 0 0\n" +
+ "ctxt 242017731764\n" +
+ "btime 1257808753\n" +
+ "processes 26414943\n" +
+ "procs_running 1\n" +
+ "procs_blocked 0\n";
+
+ /**
+ * Test parsing /proc/stat and /proc/cpuinfo
+ * @throws IOException
+ */
+ @Test
+ public void testParsingProcStatAndCpuFile() throws IOException {
+ // Write fake /proc/cpuinfo file.
+ long numProcessors = 8;
+ long cpuFrequencyKHz = 2392781;
+ String fileContent = "";
+ for (int i = 0; i < numProcessors; i++) {
+ fileContent += String.format(CPUINFO_FORMAT, i, cpuFrequencyKHz / 1000D) +
+ "\n";
+ }
+ File tempFile = new File(FAKE_CPUFILE);
+ tempFile.deleteOnExit();
+ FileWriter fWriter = new FileWriter(FAKE_CPUFILE);
+ fWriter.write(fileContent);
+ fWriter.close();
+ assertEquals(plugin.getNumProcessors(), numProcessors);
+ assertEquals(plugin.getCpuFrequency(), cpuFrequencyKHz);
+
+ // Write fake /proc/stat file.
+ long uTime = 54972994;
+ long nTime = 188860;
+ long sTime = 19803373;
+ tempFile = new File(FAKE_STATFILE);
+ tempFile.deleteOnExit();
+ updateStatFile(uTime, nTime, sTime);
+ assertEquals(plugin.getCumulativeCpuTime(),
+ FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
+ assertEquals(plugin.getCpuUsage(), (float)(LinuxResourceCalculatorPlugin.UNAVAILABLE));
+
+ // Advance the time and sample again to test the CPU usage calculation
+ uTime += 100L;
+ plugin.advanceTime(200L);
+ updateStatFile(uTime, nTime, sTime);
+ assertEquals(plugin.getCumulativeCpuTime(),
+ FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
+ assertEquals(plugin.getCpuUsage(), 6.25F);
+
+ // Advance the time and sample again. This time, we call getCpuUsage() only.
+ uTime += 600L;
+ plugin.advanceTime(300L);
+ updateStatFile(uTime, nTime, sTime);
+ assertEquals(plugin.getCpuUsage(), 25F);
+
+ // Advance very short period of time (one jiffy length).
+ // In this case, CPU usage should not be updated.
+ uTime += 1L;
+ plugin.advanceTime(1L);
+ updateStatFile(uTime, nTime, sTime);
+ assertEquals(plugin.getCumulativeCpuTime(),
+ FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
+ assertEquals(plugin.getCpuUsage(), 25F); // CPU usage is not updated.
+ }
+
+ /**
+ * Write information to fake /proc/stat file
+ */
+ private void updateStatFile(long uTime, long nTime, long sTime)
+ throws IOException {
+ FileWriter fWriter = new FileWriter(FAKE_STATFILE);
+ fWriter.write(String.format(STAT_FILE_FORMAT, uTime, nTime, sTime));
+ fWriter.close();
+ }
+
+ /**
+ * Test parsing /proc/meminfo
+ * @throws IOException
+ */
+ @Test
+ public void testParsingProcMemFile() throws IOException {
+ long memTotal = 4058864L;
+ long memFree = 99632L;
+ long inactive = 567732L;
+ long swapTotal = 2096472L;
+ long swapFree = 1818480L;
+ File tempFile = new File(FAKE_MEMFILE);
+ tempFile.deleteOnExit();
+ FileWriter fWriter = new FileWriter(FAKE_MEMFILE);
+ fWriter.write(String.format(MEMINFO_FORMAT,
+ memTotal, memFree, inactive, swapTotal, swapFree));
+
+ fWriter.close();
+ assertEquals(plugin.getAvailablePhysicalMemorySize(),
+ 1024L * (memFree + inactive));
+ assertEquals(plugin.getAvailableVirtualMemorySize(),
+ 1024L * (memFree + inactive + swapFree));
+ assertEquals(plugin.getPhysicalMemorySize(), 1024L * memTotal);
+ assertEquals(plugin.getVirtualMemorySize(), 1024L * (memTotal + swapTotal));
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
new file mode 100644
index 0000000..454ef2c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
@@ -0,0 +1,761 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.Random;
+import java.util.Vector;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Shell.ExitCodeException;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * A JUnit test to test ProcfsBasedProcessTree.
+ */
+public class TestProcfsBasedProcessTree {
+
+ private static final Log LOG = LogFactory
+ .getLog(TestProcfsBasedProcessTree.class);
+ protected static File TEST_ROOT_DIR = new File("target",
+ TestProcfsBasedProcessTree.class.getName() + "-localDir");
+
+ private ShellCommandExecutor shexec = null;
+ private String pidFile, lowestDescendant;
+ private String shellScript;
+
+ private static final int N = 6; // Controls the RogueTask
+
+ private class RogueTaskThread extends Thread {
+ public void run() {
+ try {
+ Vector<String> args = new Vector<String>();
+ if(isSetsidAvailable()) {
+ args.add("setsid");
+ }
+ args.add("bash");
+ args.add("-c");
+ args.add(" echo $$ > " + pidFile + "; sh " +
+ shellScript + " " + N + ";") ;
+ shexec = new ShellCommandExecutor(args.toArray(new String[0]));
+ shexec.execute();
+ } catch (ExitCodeException ee) {
+ LOG.info("Shell Command exit with a non-zero exit code. This is" +
+ " expected as we are killing the subprocesses of the" +
+ " task intentionally. " + ee);
+ } catch (IOException ioe) {
+ LOG.info("Error executing shell command " + ioe);
+ } finally {
+ LOG.info("Exit code: " + shexec.getExitCode());
+ }
+ }
+ }
+
+ private String getRogueTaskPID() {
+ File f = new File(pidFile);
+ while (!f.exists()) {
+ try {
+ Thread.sleep(500);
+ } catch (InterruptedException ie) {
+ break;
+ }
+ }
+
+ // read from pidFile
+ return getPidFromPidFile(pidFile);
+ }
+
+ @Before
+ public void setup() throws IOException {
+ FileContext.getLocalFSFileContext().delete(
+ new Path(TEST_ROOT_DIR.getAbsolutePath()), true);
+ }
+
+ @Test
+ public void testProcessTree() throws Exception {
+
+ try {
+ if (!ProcfsBasedProcessTree.isAvailable()) {
+ System.out
+ .println("ProcfsBasedProcessTree is not available on this system. Not testing");
+ return;
+ }
+ } catch (Exception e) {
+ LOG.info(StringUtils.stringifyException(e));
+ return;
+ }
+ // create shell script
+ Random rm = new Random();
+ File tempFile =
+ new File(TEST_ROOT_DIR, getClass().getName() + "_shellScript_"
+ + rm.nextInt() + ".sh");
+ tempFile.deleteOnExit();
+ shellScript = TEST_ROOT_DIR + File.separator + tempFile.getName();
+
+ // create pid file
+ tempFile =
+ new File(TEST_ROOT_DIR, getClass().getName() + "_pidFile_"
+ + rm.nextInt() + ".pid");
+ tempFile.deleteOnExit();
+ pidFile = TEST_ROOT_DIR + File.separator + tempFile.getName();
+
+ lowestDescendant = TEST_ROOT_DIR + File.separator + "lowestDescendantPidFile";
+
+ // write to shell-script
+ try {
+ FileWriter fWriter = new FileWriter(shellScript);
+ fWriter.write(
+ "# rogue task\n" +
+ "sleep 1\n" +
+ "echo hello\n" +
+ "if [ $1 -ne 0 ]\n" +
+ "then\n" +
+ " sh " + shellScript + " $(($1-1))\n" +
+ "else\n" +
+ " echo $$ > " + lowestDescendant + "\n" +
+ " while true\n do\n" +
+ " sleep 5\n" +
+ " done\n" +
+ "fi");
+ fWriter.close();
+ } catch (IOException ioe) {
+ LOG.info("Error: " + ioe);
+ return;
+ }
+
+ Thread t = new RogueTaskThread();
+ t.start();
+ String pid = getRogueTaskPID();
+ LOG.info("Root process pid: " + pid);
+ ProcfsBasedProcessTree p = createProcessTree(pid);
+ p = p.getProcessTree(); // initialize
+ LOG.info("ProcessTree: " + p.toString());
+
+ File leaf = new File(lowestDescendant);
+ //wait till lowest descendant process of Rougue Task starts execution
+ while (!leaf.exists()) {
+ try {
+ Thread.sleep(500);
+ } catch (InterruptedException ie) {
+ break;
+ }
+ }
+
+ p = p.getProcessTree(); // reconstruct
+ LOG.info("ProcessTree: " + p.toString());
+
+ // Get the process-tree dump
+ String processTreeDump = p.getProcessTreeDump();
+
+ // destroy the process and all its subprocesses
+ destroyProcessTree(pid);
+
+ if (isSetsidAvailable()) { // whole processtree should be gone
+ Assert.assertFalse("Proceesses in process group live",
+ isAnyProcessInTreeAlive(p));
+ } else {// process should be gone
+ Assert.assertFalse("ProcessTree must have been gone", isAlive(pid));
+ }
+
+ LOG.info("Process-tree dump follows: \n" + processTreeDump);
+ Assert.assertTrue("Process-tree dump doesn't start with a proper header",
+ processTreeDump.startsWith("\t|- PID PPID PGRPID SESSID CMD_NAME " +
+ "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) " +
+ "RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n"));
+ for (int i = N; i >= 0; i--) {
+ String cmdLineDump = "\\|- [0-9]+ [0-9]+ [0-9]+ [0-9]+ \\(sh\\)" +
+ " [0-9]+ [0-9]+ [0-9]+ [0-9]+ sh " + shellScript + " " + i;
+ Pattern pat = Pattern.compile(cmdLineDump);
+ Matcher mat = pat.matcher(processTreeDump);
+ Assert.assertTrue("Process-tree dump doesn't contain the cmdLineDump of " + i
+ + "th process!", mat.find());
+ }
+
+ // Not able to join thread sometimes when forking with large N.
+ try {
+ t.join(2000);
+ LOG.info("RogueTaskThread successfully joined.");
+ } catch (InterruptedException ie) {
+ LOG.info("Interrupted while joining RogueTaskThread.");
+ }
+
+ // ProcessTree is gone now. Any further calls should be sane.
+ p = p.getProcessTree();
+ Assert.assertFalse("ProcessTree must have been gone", isAlive(pid));
+ Assert.assertTrue("Cumulative vmem for the gone-process is "
+ + p.getCumulativeVmem() + " . It should be zero.", p
+ .getCumulativeVmem() == 0);
+ Assert.assertTrue(p.toString().equals("[ ]"));
+ }
+
+ protected ProcfsBasedProcessTree createProcessTree(String pid) {
+ return new ProcfsBasedProcessTree(pid,
+ isSetsidAvailable());
+ }
+
+ protected ProcfsBasedProcessTree createProcessTree(String pid,
+ boolean setsidUsed, String procfsRootDir) {
+ return new ProcfsBasedProcessTree(pid, setsidUsed, procfsRootDir);
+ }
+
+ protected void destroyProcessTree(String pid) throws IOException {
+ sendSignal(pid, 9);
+ }
+
+ /**
+ * Get PID from a pid-file.
+ *
+ * @param pidFileName
+ * Name of the pid-file.
+ * @return the PID string read from the pid-file. Returns null if the
+ * pidFileName points to a non-existing file or if read fails from the
+ * file.
+ */
+ public static String getPidFromPidFile(String pidFileName) {
+ BufferedReader pidFile = null;
+ FileReader fReader = null;
+ String pid = null;
+
+ try {
+ fReader = new FileReader(pidFileName);
+ pidFile = new BufferedReader(fReader);
+ } catch (FileNotFoundException f) {
+ LOG.debug("PidFile doesn't exist : " + pidFileName);
+ return pid;
+ }
+
+ try {
+ pid = pidFile.readLine();
+ } catch (IOException i) {
+ LOG.error("Failed to read from " + pidFileName);
+ } finally {
+ try {
+ if (fReader != null) {
+ fReader.close();
+ }
+ try {
+ if (pidFile != null) {
+ pidFile.close();
+ }
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + pidFile);
+ }
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + fReader);
+ }
+ }
+ return pid;
+ }
+
+ public static class ProcessStatInfo {
+ // sample stat in a single line : 3910 (gpm) S 1 3910 3910 0 -1 4194624
+ // 83 0 0 0 0 0 0 0 16 0 1 0 7852 2408448 88 4294967295 134512640
+ // 134590050 3220521392 3220520036 10975138 0 0 4096 134234626
+ // 4294967295 0 0 17 1 0 0
+ String pid;
+ String name;
+ String ppid;
+ String pgrpId;
+ String session;
+ String vmem = "0";
+ String rssmemPage = "0";
+ String utime = "0";
+ String stime = "0";
+
+ public ProcessStatInfo(String[] statEntries) {
+ pid = statEntries[0];
+ name = statEntries[1];
+ ppid = statEntries[2];
+ pgrpId = statEntries[3];
+ session = statEntries[4];
+ vmem = statEntries[5];
+ if (statEntries.length > 6) {
+ rssmemPage = statEntries[6];
+ }
+ if (statEntries.length > 7) {
+ utime = statEntries[7];
+ stime = statEntries[8];
+ }
+ }
+
+ // construct a line that mimics the procfs stat file.
+ // all unused numerical entries are set to 0.
+ public String getStatLine() {
+ return String.format("%s (%s) S %s %s %s 0 0 0" +
+ " 0 0 0 0 %s %s 0 0 0 0 0 0 0 %s %s 0 0" +
+ " 0 0 0 0 0 0 0 0" +
+ " 0 0 0 0 0",
+ pid, name, ppid, pgrpId, session,
+ utime, stime, vmem, rssmemPage);
+ }
+ }
+
+ /**
+ * A basic test that creates a few process directories and writes
+ * stat files. Verifies that the cpu time and memory is correctly
+ * computed.
+ * @throws IOException if there was a problem setting up the
+ * fake procfs directories or files.
+ */
+ @Test
+ public void testCpuAndMemoryForProcessTree() throws IOException {
+
+ // test processes
+ String[] pids = { "100", "200", "300", "400" };
+ // create the fake procfs root directory.
+ File procfsRootDir = new File(TEST_ROOT_DIR, "proc");
+
+ try {
+ setupProcfsRootDir(procfsRootDir);
+ setupPidDirs(procfsRootDir, pids);
+
+ // create stat objects.
+ // assuming processes 100, 200, 300 are in tree and 400 is not.
+ ProcessStatInfo[] procInfos = new ProcessStatInfo[4];
+ procInfos[0] = new ProcessStatInfo(new String[]
+ {"100", "proc1", "1", "100", "100", "100000", "100", "1000", "200"});
+ procInfos[1] = new ProcessStatInfo(new String[]
+ {"200", "proc2", "100", "100", "100", "200000", "200", "2000", "400"});
+ procInfos[2] = new ProcessStatInfo(new String[]
+ {"300", "proc3", "200", "100", "100", "300000", "300", "3000", "600"});
+ procInfos[3] = new ProcessStatInfo(new String[]
+ {"400", "proc4", "1", "400", "400", "400000", "400", "4000", "800"});
+
+ writeStatFiles(procfsRootDir, pids, procInfos);
+
+ // crank up the process tree class.
+ ProcfsBasedProcessTree processTree =
+ createProcessTree("100", true, procfsRootDir.getAbsolutePath());
+ // build the process tree.
+ processTree.getProcessTree();
+
+ // verify cumulative memory
+ Assert.assertEquals("Cumulative virtual memory does not match", 600000L,
+ processTree.getCumulativeVmem());
+
+ // verify rss memory
+ long cumuRssMem = ProcfsBasedProcessTree.PAGE_SIZE > 0 ?
+ 600L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
+ Assert.assertEquals("Cumulative rss memory does not match",
+ cumuRssMem, processTree.getCumulativeRssmem());
+
+ // verify cumulative cpu time
+ long cumuCpuTime = ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0 ?
+ 7200L * ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS : 0L;
+ Assert.assertEquals("Cumulative cpu time does not match",
+ cumuCpuTime, processTree.getCumulativeCpuTime());
+
+ // test the cpu time again to see if it cumulates
+ procInfos[0] = new ProcessStatInfo(new String[]
+ {"100", "proc1", "1", "100", "100", "100000", "100", "2000", "300"});
+ procInfos[1] = new ProcessStatInfo(new String[]
+ {"200", "proc2", "100", "100", "100", "200000", "200", "3000", "500"});
+ writeStatFiles(procfsRootDir, pids, procInfos);
+
+ // build the process tree.
+ processTree.getProcessTree();
+
+ // verify cumulative cpu time again
+ cumuCpuTime = ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0 ?
+ 9400L * ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS : 0L;
+ Assert.assertEquals("Cumulative cpu time does not match",
+ cumuCpuTime, processTree.getCumulativeCpuTime());
+ } finally {
+ FileUtil.fullyDelete(procfsRootDir);
+ }
+ }
+
+ /**
+ * Tests that cumulative memory is computed only for
+ * processes older than a given age.
+ * @throws IOException if there was a problem setting up the
+ * fake procfs directories or files.
+ */
+ @Test
+ public void testMemForOlderProcesses() throws IOException {
+ // initial list of processes
+ String[] pids = { "100", "200", "300", "400" };
+ // create the fake procfs root directory.
+ File procfsRootDir = new File(TEST_ROOT_DIR, "proc");
+
+ try {
+ setupProcfsRootDir(procfsRootDir);
+ setupPidDirs(procfsRootDir, pids);
+
+ // create stat objects.
+ // assuming 100, 200 and 400 are in tree, 300 is not.
+ ProcessStatInfo[] procInfos = new ProcessStatInfo[4];
+ procInfos[0] = new ProcessStatInfo(new String[]
+ {"100", "proc1", "1", "100", "100", "100000", "100"});
+ procInfos[1] = new ProcessStatInfo(new String[]
+ {"200", "proc2", "100", "100", "100", "200000", "200"});
+ procInfos[2] = new ProcessStatInfo(new String[]
+ {"300", "proc3", "1", "300", "300", "300000", "300"});
+ procInfos[3] = new ProcessStatInfo(new String[]
+ {"400", "proc4", "100", "100", "100", "400000", "400"});
+
+ writeStatFiles(procfsRootDir, pids, procInfos);
+
+ // crank up the process tree class.
+ ProcfsBasedProcessTree processTree =
+ createProcessTree("100", true, procfsRootDir.getAbsolutePath());
+ // build the process tree.
+ processTree.getProcessTree();
+
+ // verify cumulative memory
+ Assert.assertEquals("Cumulative memory does not match",
+ 700000L, processTree.getCumulativeVmem());
+
+ // write one more process as child of 100.
+ String[] newPids = { "500" };
+ setupPidDirs(procfsRootDir, newPids);
+
+ ProcessStatInfo[] newProcInfos = new ProcessStatInfo[1];
+ newProcInfos[0] = new ProcessStatInfo(new String[]
+ {"500", "proc5", "100", "100", "100", "500000", "500"});
+ writeStatFiles(procfsRootDir, newPids, newProcInfos);
+
+ // check memory includes the new process.
+ processTree.getProcessTree();
+ Assert.assertEquals("Cumulative vmem does not include new process",
+ 1200000L, processTree.getCumulativeVmem());
+ long cumuRssMem = ProcfsBasedProcessTree.PAGE_SIZE > 0 ?
+ 1200L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
+ Assert.assertEquals("Cumulative rssmem does not include new process",
+ cumuRssMem, processTree.getCumulativeRssmem());
+
+ // however processes older than 1 iteration will retain the older value
+ Assert.assertEquals("Cumulative vmem shouldn't have included new process",
+ 700000L, processTree.getCumulativeVmem(1));
+ cumuRssMem = ProcfsBasedProcessTree.PAGE_SIZE > 0 ?
+ 700L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
+ Assert.assertEquals("Cumulative rssmem shouldn't have included new process",
+ cumuRssMem, processTree.getCumulativeRssmem(1));
+
+ // one more process
+ newPids = new String[]{ "600" };
+ setupPidDirs(procfsRootDir, newPids);
+
+ newProcInfos = new ProcessStatInfo[1];
+ newProcInfos[0] = new ProcessStatInfo(new String[]
+ {"600", "proc6", "100", "100", "100", "600000", "600"});
+ writeStatFiles(procfsRootDir, newPids, newProcInfos);
+
+ // refresh process tree
+ processTree.getProcessTree();
+
+ // processes older than 2 iterations should be same as before.
+ Assert.assertEquals("Cumulative vmem shouldn't have included new processes",
+ 700000L, processTree.getCumulativeVmem(2));
+ cumuRssMem = ProcfsBasedProcessTree.PAGE_SIZE > 0 ?
+ 700L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
+ Assert.assertEquals("Cumulative rssmem shouldn't have included new processes",
+ cumuRssMem, processTree.getCumulativeRssmem(2));
+
+ // processes older than 1 iteration should not include new process,
+ // but include process 500
+ Assert.assertEquals("Cumulative vmem shouldn't have included new processes",
+ 1200000L, processTree.getCumulativeVmem(1));
+ cumuRssMem = ProcfsBasedProcessTree.PAGE_SIZE > 0 ?
+ 1200L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
+ Assert.assertEquals("Cumulative rssmem shouldn't have included new processes",
+ cumuRssMem, processTree.getCumulativeRssmem(1));
+
+ // no processes older than 3 iterations, this should be 0
+ Assert.assertEquals("Getting non-zero vmem for processes older than 3 iterations",
+ 0L, processTree.getCumulativeVmem(3));
+ Assert.assertEquals("Getting non-zero rssmem for processes older than 3 iterations",
+ 0L, processTree.getCumulativeRssmem(3));
+ } finally {
+ FileUtil.fullyDelete(procfsRootDir);
+ }
+ }
+
+ /**
+ * Verifies ProcfsBasedProcessTree.checkPidPgrpidForMatch() in case of
+ * 'constructProcessInfo() returning null' by not writing stat file for the
+ * mock process
+ * @throws IOException if there was a problem setting up the
+ * fake procfs directories or files.
+ */
+ @Test
+ public void testDestroyProcessTree() throws IOException {
+ // test process
+ String pid = "100";
+ // create the fake procfs root directory.
+ File procfsRootDir = new File(TEST_ROOT_DIR, "proc");
+
+ try {
+ setupProcfsRootDir(procfsRootDir);
+
+ // crank up the process tree class.
+ ProcfsBasedProcessTree processTree =
+ createProcessTree(pid, true, procfsRootDir.getAbsolutePath());
+
+ // Let us not create stat file for pid 100.
+ Assert.assertTrue(ProcfsBasedProcessTree.checkPidPgrpidForMatch(
+ Integer.valueOf(pid), procfsRootDir.getAbsolutePath()));
+ } finally {
+ FileUtil.fullyDelete(procfsRootDir);
+ }
+ }
+
+ /**
+ * Test the correctness of process-tree dump.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testProcessTreeDump()
+ throws IOException {
+
+ String[] pids = { "100", "200", "300", "400", "500", "600" };
+
+ File procfsRootDir = new File(TEST_ROOT_DIR, "proc");
+
+ try {
+ setupProcfsRootDir(procfsRootDir);
+ setupPidDirs(procfsRootDir, pids);
+
+ int numProcesses = pids.length;
+ // Processes 200, 300, 400 and 500 are descendants of 100. 600 is not.
+ ProcessStatInfo[] procInfos = new ProcessStatInfo[numProcesses];
+ procInfos[0] = new ProcessStatInfo(new String[] {
+ "100", "proc1", "1", "100", "100", "100000", "100", "1000", "200"});
+ procInfos[1] = new ProcessStatInfo(new String[] {
+ "200", "proc2", "100", "100", "100", "200000", "200", "2000", "400"});
+ procInfos[2] = new ProcessStatInfo(new String[] {
+ "300", "proc3", "200", "100", "100", "300000", "300", "3000", "600"});
+ procInfos[3] = new ProcessStatInfo(new String[] {
+ "400", "proc4", "200", "100", "100", "400000", "400", "4000", "800"});
+ procInfos[4] = new ProcessStatInfo(new String[] {
+ "500", "proc5", "400", "100", "100", "400000", "400", "4000", "800"});
+ procInfos[5] = new ProcessStatInfo(new String[] {
+ "600", "proc6", "1", "1", "1", "400000", "400", "4000", "800"});
+
+ String[] cmdLines = new String[numProcesses];
+ cmdLines[0] = "proc1 arg1 arg2";
+ cmdLines[1] = "proc2 arg3 arg4";
+ cmdLines[2] = "proc3 arg5 arg6";
+ cmdLines[3] = "proc4 arg7 arg8";
+ cmdLines[4] = "proc5 arg9 arg10";
+ cmdLines[5] = "proc6 arg11 arg12";
+
+ writeStatFiles(procfsRootDir, pids, procInfos);
+ writeCmdLineFiles(procfsRootDir, pids, cmdLines);
+
+ ProcfsBasedProcessTree processTree = createProcessTree(
+ "100", true, procfsRootDir.getAbsolutePath());
+ // build the process tree.
+ processTree.getProcessTree();
+
+ // Get the process-tree dump
+ String processTreeDump = processTree.getProcessTreeDump();
+
+ LOG.info("Process-tree dump follows: \n" + processTreeDump);
+ Assert.assertTrue("Process-tree dump doesn't start with a proper header",
+ processTreeDump.startsWith("\t|- PID PPID PGRPID SESSID CMD_NAME " +
+ "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) " +
+ "RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n"));
+ for (int i = 0; i < 5; i++) {
+ ProcessStatInfo p = procInfos[i];
+ Assert.assertTrue(
+ "Process-tree dump doesn't contain the cmdLineDump of process "
+ + p.pid, processTreeDump.contains("\t|- " + p.pid + " "
+ + p.ppid + " " + p.pgrpId + " " + p.session + " (" + p.name
+ + ") " + p.utime + " " + p.stime + " " + p.vmem + " "
+ + p.rssmemPage + " " + cmdLines[i]));
+ }
+
+ // 600 should not be in the dump
+ ProcessStatInfo p = procInfos[5];
+ Assert.assertFalse(
+ "Process-tree dump shouldn't contain the cmdLineDump of process "
+ + p.pid, processTreeDump.contains("\t|- " + p.pid + " " + p.ppid
+ + " " + p.pgrpId + " " + p.session + " (" + p.name + ") "
+ + p.utime + " " + p.stime + " " + p.vmem + " " + cmdLines[5]));
+ } finally {
+ FileUtil.fullyDelete(procfsRootDir);
+ }
+ }
+
+ protected static boolean isSetsidAvailable() {
+ ShellCommandExecutor shexec = null;
+ boolean setsidSupported = true;
+ try {
+ String[] args = {"setsid", "bash", "-c", "echo $$"};
+ shexec = new ShellCommandExecutor(args);
+ shexec.execute();
+ } catch (IOException ioe) {
+ LOG.warn("setsid is not available on this machine. So not using it.");
+ setsidSupported = false;
+ } finally { // handle the exit code
+ LOG.info("setsid exited with exit code " + shexec.getExitCode());
+ }
+ return setsidSupported;
+ }
+
+ /**
+ * Is the root-process alive?
+ * Used only in tests.
+ * @return true if the root-process is alive, false otherwise.
+ */
+ private static boolean isAlive(String pid) {
+ try {
+ final String sigpid = isSetsidAvailable() ? "-" + pid : pid;
+ try {
+ sendSignal(sigpid, 0);
+ } catch (ExitCodeException e) {
+ return false;
+ }
+ return true;
+ } catch (IOException ignored) {
+ }
+ return false;
+ }
+
+ private static void sendSignal(String pid, int signal) throws IOException {
+ ShellCommandExecutor shexec = null;
+ String[] arg = { "kill", "-" + signal, pid };
+ shexec = new ShellCommandExecutor(arg);
+ shexec.execute();
+ }
+
+ /**
+ * Is any of the subprocesses in the process-tree alive?
+ * Used only in tests.
+ * @return true if any of the processes in the process-tree is
+ * alive, false otherwise.
+ */
+ private static boolean isAnyProcessInTreeAlive(
+ ProcfsBasedProcessTree processTree) {
+ for (Integer pId : processTree.getCurrentProcessIDs()) {
+ if (isAlive(pId.toString())) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Create a directory to mimic the procfs file system's root.
+ * @param procfsRootDir root directory to create.
+ * @throws IOException if could not delete the procfs root directory
+ */
+ public static void setupProcfsRootDir(File procfsRootDir)
+ throws IOException {
+ // cleanup any existing process root dir.
+ if (procfsRootDir.exists()) {
+ Assert.assertTrue(FileUtil.fullyDelete(procfsRootDir));
+ }
+
+ // create afresh
+ Assert.assertTrue(procfsRootDir.mkdirs());
+ }
+
+ /**
+ * Create PID directories under the specified procfs root directory
+ * @param procfsRootDir root directory of procfs file system
+ * @param pids the PID directories to create.
+ * @throws IOException If PID dirs could not be created
+ */
+ public static void setupPidDirs(File procfsRootDir, String[] pids)
+ throws IOException {
+ for (String pid : pids) {
+ File pidDir = new File(procfsRootDir, pid);
+ pidDir.mkdir();
+ if (!pidDir.exists()) {
+ throw new IOException ("couldn't make process directory under " +
+ "fake procfs");
+ } else {
+ LOG.info("created pid dir");
+ }
+ }
+ }
+
+ /**
+ * Write stat files under the specified pid directories with data
+ * setup in the corresponding ProcessStatInfo objects
+ * @param procfsRootDir root directory of procfs file system
+ * @param pids the PID directories under which to create the stat file
+ * @param procs corresponding ProcessStatInfo objects whose data should be
+ * written to the stat files.
+ * @throws IOException if stat files could not be written
+ */
+ public static void writeStatFiles(File procfsRootDir, String[] pids,
+ ProcessStatInfo[] procs) throws IOException {
+ for (int i=0; i<pids.length; i++) {
+ File statFile =
+ new File(new File(procfsRootDir, pids[i]),
+ ProcfsBasedProcessTree.PROCFS_STAT_FILE);
+ BufferedWriter bw = null;
+ try {
+ FileWriter fw = new FileWriter(statFile);
+ bw = new BufferedWriter(fw);
+ bw.write(procs[i].getStatLine());
+ LOG.info("wrote stat file for " + pids[i] +
+ " with contents: " + procs[i].getStatLine());
+ } finally {
+ // not handling exception - will throw an error and fail the test.
+ if (bw != null) {
+ bw.close();
+ }
+ }
+ }
+ }
+
+ private static void writeCmdLineFiles(File procfsRootDir, String[] pids,
+ String[] cmdLines)
+ throws IOException {
+ for (int i = 0; i < pids.length; i++) {
+ File statFile =
+ new File(new File(procfsRootDir, pids[i]),
+ ProcfsBasedProcessTree.PROCFS_CMDLINE_FILE);
+ BufferedWriter bw = null;
+ try {
+ bw = new BufferedWriter(new FileWriter(statFile));
+ bw.write(cmdLines[i]);
+ LOG.info("wrote command-line file for " + pids[i] + " with contents: "
+ + cmdLines[i]);
+ } finally {
+ // not handling exception - will throw an error and fail the test.
+ if (bw != null) {
+ bw.close();
+ }
+ }
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestYarnVersionInfo.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestYarnVersionInfo.java
new file mode 100644
index 0000000..73d7ff6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestYarnVersionInfo.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import junit.framework.TestCase;
+import java.io.IOException;
+
+import org.apache.hadoop.yarn.util.YarnVersionInfo;
+import org.junit.Test;
+
+/**
+ * A JUnit test to test {@link YarnVersionInfo}
+ */
+public class TestYarnVersionInfo extends TestCase {
+
+ /**
+ * Test the yarn version info routines.
+ * @throws IOException
+ */
+ @Test
+ public void testVersionInfoGenerated() throws IOException {
+
+ // can't easily know what the correct values are going to be so just
+ // make sure they aren't Unknown
+ assertTrue("getVersion returned Unknown", !YarnVersionInfo.getVersion().equals("Unknown"));
+ assertTrue("getUser returned Unknown", !YarnVersionInfo.getUser().equals("Unknown"));
+ assertTrue("getUrl returned Unknown", !YarnVersionInfo.getUrl().equals("Unknown"));
+ assertTrue("getSrcChecksum returned Unknown", !YarnVersionInfo.getSrcChecksum().equals("Unknown"));
+
+ // these could be Unknown if the VersionInfo generated from code not in svn or git
+ // so just check that they return something
+ assertNotNull("getRevision returned null", YarnVersionInfo.getRevision());
+ assertNotNull("getBranch returned null", YarnVersionInfo.getBranch());
+
+ assertTrue("getBuildVersion check doesn't contain: source checksum",
+ YarnVersionInfo.getBuildVersion().contains("source checksum"));
+
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestParseRoute.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestParseRoute.java
new file mode 100644
index 0000000..87d62cc
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestParseRoute.java
@@ -0,0 +1,83 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp;
+
+import java.util.Arrays;
+
+import org.apache.hadoop.yarn.webapp.WebApp;
+import org.apache.hadoop.yarn.webapp.WebAppException;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+public class TestParseRoute {
+
+ @Test public void testNormalAction() {
+ assertEquals(Arrays.asList("/foo/action", "foo", "action", ":a1", ":a2"),
+ WebApp.parseRoute("/foo/action/:a1/:a2"));
+ }
+
+ @Test public void testDefaultController() {
+ assertEquals(Arrays.asList("/", "default", "index"),
+ WebApp.parseRoute("/"));
+ }
+
+ @Test public void testDefaultAction() {
+ assertEquals(Arrays.asList("/foo", "foo", "index"),
+ WebApp.parseRoute("/foo"));
+ assertEquals(Arrays.asList("/foo", "foo", "index"),
+ WebApp.parseRoute("/foo/"));
+ }
+
+ @Test public void testMissingAction() {
+ assertEquals(Arrays.asList("/foo", "foo", "index", ":a1"),
+ WebApp.parseRoute("/foo/:a1"));
+ }
+
+ @Test public void testDefaultCapture() {
+ assertEquals(Arrays.asList("/", "default", "index", ":a"),
+ WebApp.parseRoute("/:a"));
+ }
+
+ @Test public void testPartialCapture1() {
+ assertEquals(Arrays.asList("/foo/action/bar", "foo", "action", "bar", ":a"),
+ WebApp.parseRoute("/foo/action/bar/:a"));
+ }
+
+ @Test public void testPartialCapture2() {
+ assertEquals(Arrays.asList("/foo/action", "foo", "action", ":a1", "bar",
+ ":a2", ":a3"),
+ WebApp.parseRoute("/foo/action/:a1/bar/:a2/:a3"));
+ }
+
+ @Test public void testLeadingPaddings() {
+ assertEquals(Arrays.asList("/foo/action", "foo", "action", ":a"),
+ WebApp.parseRoute(" /foo/action/ :a"));
+ }
+
+ @Test public void testTrailingPaddings() {
+ assertEquals(Arrays.asList("/foo/action", "foo", "action", ":a"),
+ WebApp.parseRoute("/foo/action//:a / "));
+ assertEquals(Arrays.asList("/foo/action", "foo", "action"),
+ WebApp.parseRoute("/foo/action / "));
+ }
+
+ @Test(expected=WebAppException.class) public void testMissingLeadingSlash() {
+ WebApp.parseRoute("foo/bar");
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestSubViews.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestSubViews.java
new file mode 100644
index 0000000..29ac6b4
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestSubViews.java
@@ -0,0 +1,75 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp;
+
+import org.apache.hadoop.yarn.webapp.test.WebAppTests;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+import org.apache.hadoop.yarn.webapp.view.HtmlPage;
+import java.io.PrintWriter;
+import javax.servlet.http.HttpServletResponse;
+import com.google.inject.Injector;
+
+import org.junit.Test;
+import static org.mockito.Mockito.*;
+
+public class TestSubViews {
+
+ static public class MainView extends HtmlPage {
+ @Override
+ public void render(Page.HTML<_> html) {
+ html.
+ body().
+ div().
+ _(Sub1.class)._().
+ div().
+ i("inline text").
+ _(Sub2.class)._()._()._();
+ }
+ }
+
+ static public class Sub1 extends HtmlBlock {
+ @Override
+ public void render(Block html) {
+ html.
+ div("#sub1").
+ _("sub1 text")._();
+ }
+ }
+
+ static public class Sub2 extends HtmlBlock {
+ @Override
+ public void render(Block html) {
+ html.
+ pre().
+ _("sub2 text")._();
+ }
+ }
+
+ @Test public void testSubView() throws Exception {
+ Injector injector = WebAppTests.createMockInjector(this);
+ injector.getInstance(MainView.class).render();
+
+ PrintWriter out =
+ injector.getInstance(HttpServletResponse.class).getWriter();
+ out.flush();
+ verify(out).print("sub1 text");
+ verify(out).print("sub2 text");
+ verify(out, times(15)).println(); // test inline transition across views
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
new file mode 100644
index 0000000..db84f32
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
@@ -0,0 +1,231 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp;
+
+import org.apache.hadoop.yarn.MockApps;
+import org.apache.hadoop.yarn.webapp.Controller;
+import org.apache.hadoop.yarn.webapp.WebApp;
+import org.apache.hadoop.yarn.webapp.WebApps;
+import org.apache.hadoop.yarn.webapp.view.HtmlPage;
+import org.apache.hadoop.yarn.webapp.view.JQueryUI;
+import org.apache.hadoop.yarn.webapp.view.TextPage;
+
+import com.google.inject.Inject;
+
+import java.io.InputStream;
+import java.net.HttpURLConnection;
+import java.net.URL;
+
+import static org.apache.hadoop.yarn.util.StringHelper.*;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class TestWebApp {
+ static final Logger LOG = LoggerFactory.getLogger(TestWebApp.class);
+
+ static class FooController extends Controller {
+ final TestWebApp test;
+
+ @Inject FooController(TestWebApp test) {
+ this.test = test;
+ }
+
+ @Override public void index() {
+ set("key", test.echo("foo"));
+ }
+
+ public void bar() {
+ set("key", "bar");
+ }
+
+ public void names() {
+ for (int i = 0; i < 20; ++i) {
+ renderText(MockApps.newAppName() + "\n");
+ }
+ }
+
+ public void ex() {
+ boolean err = $("clear").isEmpty();
+ renderText(err ? "Should redirect to an error page." : "No error!");
+ if (err) {
+ throw new RuntimeException("exception test");
+ }
+ }
+
+ public void tables() {
+ render(TablesView.class);
+ }
+ }
+
+ static class FooView extends TextPage {
+ @Override public void render() {
+ puts($("key"), $("foo"));
+ }
+ }
+
+ static class DefaultController extends Controller {
+ @Override public void index() {
+ set("key", "default");
+ render(FooView.class);
+ }
+ }
+
+ static class TablesView extends HtmlPage {
+ @Override
+ public void render(Page.HTML<_> html) {
+ set(DATATABLES_ID, "t1 t2 t3 t4");
+ set(initID(DATATABLES, "t1"), tableInit().append("}").toString());
+ set(initID(DATATABLES, "t2"), join("{bJQueryUI:true, sDom:'t',",
+ "aoColumns:[null, {bSortable:false, bSearchable:false}]}"));
+ set(initID(DATATABLES, "t3"), "{bJQueryUI:true, sDom:'t'}");
+ set(initID(DATATABLES, "t4"), "{bJQueryUI:true, sDom:'t'}");
+ html.
+ title("Test DataTables").
+ link("/static/yarn.css").
+ _(JQueryUI.class).
+ style(".wrapper { padding: 1em }",
+ ".wrapper h2 { margin: 0.5em 0 }",
+ ".dataTables_wrapper { min-height: 1em }").
+ div(".wrapper").
+ h2("Default table init").
+ table("#t1").
+ thead().
+ tr().th("Column1").th("Column2")._()._().
+ tbody().
+ tr().td("c1r1").td("c2r1")._().
+ tr().td("c1r2").td("c2r2")._()._()._().
+ h2("Nested tables").
+ div(_INFO_WRAP).
+ table("#t2").
+ thead().
+ tr().th(_TH, "Column1").th(_TH, "Column2")._()._().
+ tbody().
+ tr().td("r1"). // th wouldn't work as of dt 1.7.5
+ td().$class(C_TABLE).
+ table("#t3").
+ thead().
+ tr().th("SubColumn1").th("SubColumn2")._()._().
+ tbody().
+ tr().td("subc1r1").td("subc2r1")._().
+ tr().td("subc1r2").td("subc2r2")._()._()._()._()._().
+ tr().td("r2"). // ditto
+ td().$class(C_TABLE).
+ table("#t4").
+ thead().
+ tr().th("SubColumn1").th("SubColumn2")._()._().
+ tbody().
+ tr().td("subc1r1").td("subc2r1")._().
+ tr().td("subc1r2").td("subc2r2")._().
+ _()._()._()._()._()._()._()._()._();
+ }
+ }
+
+ String echo(String s) { return s; }
+
+ @Test public void testCreate() {
+ WebApp app = WebApps.$for(this).start();
+ app.stop();
+ }
+
+ @Test public void testDefaultRoutes() throws Exception {
+ WebApp app = WebApps.$for("test", this).start();
+ String baseUrl = baseUrl(app);
+ try {
+ assertEquals("foo", getContent(baseUrl +"test/foo").trim());
+ assertEquals("foo", getContent(baseUrl +"test/foo/index").trim());
+ assertEquals("bar", getContent(baseUrl +"test/foo/bar").trim());
+ assertEquals("default", getContent(baseUrl +"test").trim());
+ assertEquals("default", getContent(baseUrl +"test/").trim());
+ assertEquals("default", getContent(baseUrl).trim());
+ } finally {
+ app.stop();
+ }
+ }
+
+ @Test public void testCustomRoutes() throws Exception {
+ WebApp app = WebApps.$for("test", this).start(new WebApp() {
+ @Override public void setup() {
+ route("/:foo", FooController.class);
+ route("/bar/foo", FooController.class, "bar");
+ route("/foo/:foo", DefaultController.class);
+ route("/foo/bar/:foo", DefaultController.class, "index");
+ }
+ });
+ String baseUrl = baseUrl(app);
+ try {
+ assertEquals("foo", getContent(baseUrl).trim());
+ assertEquals("foo", getContent(baseUrl +"test").trim());
+ assertEquals("foo1", getContent(baseUrl +"test/1").trim());
+ assertEquals("bar", getContent(baseUrl +"test/bar/foo").trim());
+ assertEquals("default", getContent(baseUrl +"test/foo/bar").trim());
+ assertEquals("default1", getContent(baseUrl +"test/foo/1").trim());
+ assertEquals("default2", getContent(baseUrl +"test/foo/bar/2").trim());
+ assertEquals(404, getResponseCode(baseUrl +"test/goo"));
+ } finally {
+ app.stop();
+ }
+ }
+
+ static String baseUrl(WebApp app) {
+ return "http://localhost:"+ app.port() +"/";
+ }
+
+ static String getContent(String url) {
+ try {
+ StringBuilder out = new StringBuilder();
+ InputStream in = new URL(url).openConnection().getInputStream();
+ byte[] buffer = new byte[64 * 1024];
+ int len = in.read(buffer);
+ while (len > 0) {
+ out.append(new String(buffer, 0, len));
+ len = in.read(buffer);
+ }
+ return out.toString();
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ static int getResponseCode(String url) {
+ try {
+ HttpURLConnection c = (HttpURLConnection)new URL(url).openConnection();
+ return c.getResponseCode();
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ // For manual controller/view testing.
+ WebApps.$for("test", new TestWebApp()).at(8888).inDevMode().start().
+ joinThread();
+// start(new WebApp() {
+// @Override public void setup() {
+// route("/:foo", FooController.class);
+// route("/foo/:foo", FooController.class);
+// route("/bar", FooController.class);
+// }
+// }).join();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestHamlet.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestHamlet.java
new file mode 100644
index 0000000..1ea8bcf
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestHamlet.java
@@ -0,0 +1,166 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.hamlet;
+
+import java.util.EnumSet;
+import java.io.PrintWriter;
+import org.junit.Test;
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.*;
+
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+
+import static org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.*;
+
+public class TestHamlet {
+
+ @Test public void testHamlet() {
+ Hamlet h = newHamlet().
+ title("test").
+ h1("heading 1").
+ p("#id.class").
+ b("hello").
+ em("world!")._().
+ div("#footer").
+ _("Brought to you by").
+ a("http://hostname/", "Somebody")._();
+
+ PrintWriter out = h.getWriter();
+ out.flush();
+ assertEquals(0, h.nestLevel);
+ verify(out).print("<title");
+ verify(out).print("test");
+ verify(out).print("</title>");
+ verify(out).print("<h1");
+ verify(out).print("heading 1");
+ verify(out).print("</h1>");
+ verify(out).print("<p");
+ verify(out).print(" id=\"id\"");
+ verify(out).print(" class=\"class\"");
+ verify(out).print("<b");
+ verify(out).print("hello");
+ verify(out).print("</b>");
+ verify(out).print("<em");
+ verify(out).print("world!");
+ verify(out).print("</em>");
+ verify(out).print("<div");
+ verify(out).print(" id=\"footer\"");
+ verify(out).print("Brought to you by");
+ verify(out).print("<a");
+ verify(out).print(" href=\"http://hostname/\"");
+ verify(out).print("Somebody");
+ verify(out).print("</a>");
+ verify(out).print("</div>");
+ verify(out, never()).print("</p>");
+ }
+
+ @Test public void testTable() {
+ Hamlet h = newHamlet().
+ title("test table").
+ link("style.css");
+
+ TABLE t = h.table("#id");
+
+ for (int i = 0; i < 3; ++i) {
+ t.tr().td("1").td("2")._();
+ }
+ t._();
+
+ PrintWriter out = h.getWriter();
+ out.flush();
+ assertEquals(0, h.nestLevel);
+ verify(out).print("<table");
+ verify(out).print("</table>");
+ verify(out, never()).print("</td>");
+ verify(out, never()).print("</tr>");
+ }
+
+ @Test public void testEnumAttrs() {
+ Hamlet h = newHamlet().
+ meta_http("Content-type", "text/html; charset=utf-8").
+ title("test enum attrs").
+ link().$rel("stylesheet").
+ $media(EnumSet.of(Media.screen, Media.print)).
+ $type("text/css").$href("style.css")._().
+ link().$rel(EnumSet.of(LinkType.index, LinkType.start)).
+ $href("index.html")._();
+
+ h.div("#content")._("content")._();
+
+ PrintWriter out = h.getWriter();
+ out.flush();
+ assertEquals(0, h.nestLevel);
+ verify(out).print(" media=\"screen, print\"");
+ verify(out).print(" rel=\"start index\"");
+ }
+
+ @Test public void testScriptStyle() {
+ Hamlet h = newHamlet().
+ script("a.js").script("b.js").
+ style("h1 { font-size: 1.2em }");
+
+ PrintWriter out = h.getWriter();
+ out.flush();
+ assertEquals(0, h.nestLevel);
+ verify(out, times(2)).print(" type=\"text/javascript\"");
+ verify(out).print(" type=\"text/css\"");
+ }
+
+ @Test public void testPreformatted() {
+ Hamlet h = newHamlet().
+ div().
+ i("inline before pre").
+ pre().
+ _("pre text1\npre text2").
+ i("inline in pre").
+ _("pre text after inline")._().
+ i("inline after pre")._();
+
+ PrintWriter out = h.getWriter();
+ out.flush();
+ assertEquals(5, h.indents);
+ }
+
+ static class TestView1 implements SubView {
+ @Override public void renderPartial() {}
+ }
+
+ static class TestView2 implements SubView {
+ @Override public void renderPartial() {}
+ }
+
+ @Test public void testSubViews() {
+ Hamlet h = newHamlet().
+ title("test sub-views").
+ div("#view1")._(TestView1.class)._().
+ div("#view2")._(TestView2.class)._();
+
+ PrintWriter out = h.getWriter();
+ out.flush();
+ assertEquals(0, h.nestLevel);
+ verify(out).print("["+ TestView1.class.getName() +"]");
+ verify(out).print("["+ TestView2.class.getName() +"]");
+ }
+
+ static Hamlet newHamlet() {
+ PrintWriter out = spy(new PrintWriter(System.out));
+ return new Hamlet(out, 0, false);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestHamletImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestHamletImpl.java
new file mode 100644
index 0000000..9a1efcf
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestHamletImpl.java
@@ -0,0 +1,108 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.hamlet;
+
+import java.io.PrintWriter;
+import org.junit.Test;
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.*;
+
+import org.apache.hadoop.yarn.webapp.hamlet.HamletImpl;
+import org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.*;
+
+public class TestHamletImpl {
+ /**
+ * Test the generic implementation methods
+ * @see TestHamlet for Hamlet syntax
+ */
+ @Test public void testGeneric() {
+ PrintWriter out = spy(new PrintWriter(System.out));
+ HamletImpl hi = new HamletImpl(out, 0, false);
+ hi.
+ root("start")._attr("name", "value").
+ _("start text").
+ elem("sub")._attr("name", "value").
+ _("sub text")._().
+ elem("sub1")._noEndTag()._attr("boolean", null).
+ _("sub1text")._().
+ _("start text2").
+ elem("pre")._pre().
+ _("pre text").
+ elem("i")._inline()._("inline")._()._().
+ elem("i")._inline()._("inline after pre")._().
+ _("start text3").
+ elem("sub2").
+ _("sub2text")._().
+ elem("sub3")._noEndTag().
+ _("sub3text")._().
+ elem("sub4")._noEndTag().
+ elem("i")._inline()._("inline")._().
+ _("sub4text")._()._();
+
+ out.flush();
+ assertEquals(0, hi.nestLevel);
+ assertEquals(20, hi.indents);
+ verify(out).print("<start");
+ verify(out, times(2)).print(" name=\"value\"");
+ verify(out).print(" boolean");
+ verify(out).print("</start>");
+ verify(out, never()).print("</sub1>");
+ verify(out, never()).print("</sub3>");
+ verify(out, never()).print("</sub4>");
+ }
+
+ @Test public void testSetSelector() {
+ CoreAttrs e = mock(CoreAttrs.class);
+ HamletImpl.setSelector(e, "#id.class");
+
+ verify(e).$id("id");
+ verify(e).$class("class");
+
+ H1 t = mock(H1.class);
+ HamletImpl.setSelector(t, "#id.class")._("heading");
+
+ verify(t).$id("id");
+ verify(t).$class("class");
+ verify(t)._("heading");
+ }
+
+ @Test public void testSetLinkHref() {
+ LINK link = mock(LINK.class);
+ HamletImpl.setLinkHref(link, "uri");
+ HamletImpl.setLinkHref(link, "style.css");
+
+ verify(link).$href("uri");
+ verify(link).$rel("stylesheet");
+ verify(link).$href("style.css");
+
+ verifyNoMoreInteractions(link);
+ }
+
+ @Test public void testSetScriptSrc() {
+ SCRIPT script = mock(SCRIPT.class);
+ HamletImpl.setScriptSrc(script, "uri");
+ HamletImpl.setScriptSrc(script, "script.js");
+
+ verify(script).$src("uri");
+ verify(script).$type("text/javascript");
+ verify(script).$src("script.js");
+
+ verifyNoMoreInteractions(script);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestParseSelector.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestParseSelector.java
new file mode 100644
index 0000000..3b6de66
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestParseSelector.java
@@ -0,0 +1,57 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.hamlet;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+import org.apache.hadoop.yarn.webapp.WebAppException;
+
+import static org.apache.hadoop.yarn.webapp.hamlet.HamletImpl.*;
+
+public class TestParseSelector {
+
+ @Test public void testNormal() {
+ String[] res = parseSelector("#id.class");
+ assertEquals("id", res[S_ID]);
+ assertEquals("class", res[S_CLASS]);
+ }
+
+ @Test public void testMultiClass() {
+ String[] res = parseSelector("#id.class1.class2");
+ assertEquals("id", res[S_ID]);
+ assertEquals("class1 class2", res[S_CLASS]);
+ }
+
+ @Test public void testMissingId() {
+ String[] res = parseSelector(".class");
+ assertNull(res[S_ID]);
+ assertEquals("class", res[S_CLASS]);
+ }
+
+ @Test public void testMissingClass() {
+ String[] res = parseSelector("#id");
+ assertEquals("id", res[S_ID]);
+ assertNull(res[S_CLASS]);
+ }
+
+ @Test(expected=WebAppException.class) public void testMissingAll() {
+ parseSelector("");
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/test/TestWebAppTests.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/test/TestWebAppTests.java
new file mode 100644
index 0000000..e2f2bfa
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/test/TestWebAppTests.java
@@ -0,0 +1,103 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.test;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.Injector;
+import com.google.inject.servlet.RequestScoped;
+import java.io.PrintWriter;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import org.junit.Test;
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.*;
+
+import org.slf4j.LoggerFactory;
+import org.slf4j.Logger;
+
+public class TestWebAppTests {
+ static final Logger LOG = LoggerFactory.getLogger(TestWebAppTests.class);
+
+ @Test public void testInstances() throws Exception {
+ Injector injector = WebAppTests.createMockInjector(this);
+ HttpServletRequest req = injector.getInstance(HttpServletRequest.class);
+ HttpServletResponse res = injector.getInstance(HttpServletResponse.class);
+ String val = req.getParameter("foo");
+ PrintWriter out = res.getWriter();
+ out.println("Hello world!");
+ logInstances(req, res, out);
+
+ assertSame(req, injector.getInstance(HttpServletRequest.class));
+ assertSame(res, injector.getInstance(HttpServletResponse.class));
+ assertSame(this, injector.getInstance(TestWebAppTests.class));
+
+ verify(req).getParameter("foo");
+ verify(res).getWriter();
+ verify(out).println("Hello world!");
+ }
+
+ interface Foo {
+ }
+
+ static class Bar implements Foo {
+ }
+
+ static class FooBar extends Bar {
+ }
+
+ @Test public void testCreateInjector() throws Exception {
+ Bar bar = new Bar();
+ Injector injector = WebAppTests.createMockInjector(Foo.class, bar);
+ logInstances(injector.getInstance(HttpServletRequest.class),
+ injector.getInstance(HttpServletResponse.class),
+ injector.getInstance(HttpServletResponse.class).getWriter());
+ assertSame(bar, injector.getInstance(Foo.class));
+ }
+
+ @Test public void testCreateInjector2() {
+ final FooBar foobar = new FooBar();
+ Bar bar = new Bar();
+ Injector injector = WebAppTests.createMockInjector(Foo.class, bar,
+ new AbstractModule() {
+ @Override protected void configure() {
+ bind(Bar.class).toInstance(foobar);
+ }
+ });
+ assertNotSame(bar, injector.getInstance(Bar.class));
+ assertSame(foobar, injector.getInstance(Bar.class));
+ }
+
+ @RequestScoped
+ static class ScopeTest {
+ }
+
+ @Test public void testRequestScope() {
+ Injector injector = WebAppTests.createMockInjector(this);
+
+ assertSame(injector.getInstance(ScopeTest.class),
+ injector.getInstance(ScopeTest.class));
+ }
+
+ private void logInstances(HttpServletRequest req, HttpServletResponse res,
+ PrintWriter out) {
+ LOG.info("request: {}", req);
+ LOG.info("response: {}", res);
+ LOG.info("writer: {}", out);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/test/WebAppTests.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/test/WebAppTests.java
new file mode 100644
index 0000000..e94fd5a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/test/WebAppTests.java
@@ -0,0 +1,162 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.test;
+
+import org.apache.hadoop.yarn.webapp.Controller;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.View;
+import org.apache.hadoop.yarn.webapp.WebAppException;
+
+import java.lang.reflect.Method;
+import com.google.inject.Module;
+import com.google.inject.Scopes;
+import com.google.inject.servlet.RequestScoped;
+import com.google.inject.AbstractModule;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.Provides;
+
+import java.io.PrintWriter;
+import javax.servlet.http.HttpServletResponse;
+import javax.servlet.http.HttpServletRequest;
+
+
+import static org.mockito.Mockito.*;
+
+public class WebAppTests {
+
+ /**
+ * Create a mock injector for tests
+ * @param <T> type of class/interface
+ * @param api the interface class of the object to inject
+ * @param impl the implementation object to inject
+ * @param modules additional guice modules
+ * @return an injector
+ */
+ public static <T> Injector createMockInjector(final Class<T> api,
+ final T impl,
+ final Module... modules) {
+ return Guice.createInjector(new AbstractModule() {
+ final PrintWriter writer = spy(new PrintWriter(System.out));
+ final HttpServletRequest request = createRequest();
+ final HttpServletResponse response = createResponse();
+
+ @Override
+ protected void configure() {
+ if (api != null) {
+ bind(api).toInstance(impl);
+ }
+ bindScope(RequestScoped.class, Scopes.SINGLETON);
+ if (modules != null) {
+ for (Module module : modules) {
+ install(module);
+ }
+ }
+ }
+
+ @Provides HttpServletRequest request() {
+ return request;
+ }
+
+ @Provides HttpServletResponse response() {
+ return response;
+ }
+
+ @Provides PrintWriter writer() {
+ return writer;
+ }
+
+ HttpServletRequest createRequest() {
+ // the default suffices for now
+ return mock(HttpServletRequest.class);
+ }
+
+ HttpServletResponse createResponse() {
+ try {
+ HttpServletResponse res = mock(HttpServletResponse.class);
+ when(res.getWriter()).thenReturn(writer);
+ return res;
+ } catch (Exception e) {
+ throw new WebAppException(e);
+ }
+ }
+ });
+ }
+
+ // convenience
+ @SuppressWarnings("unchecked")
+ public static <T> Injector createMockInjector(T impl) {
+ return createMockInjector((Class<T>)impl.getClass(), impl);
+ }
+
+ public static void flushOutput(Injector injector) {
+ HttpServletResponse res = injector.getInstance(HttpServletResponse.class);
+ try {
+ res.getWriter().flush();
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public static <T> Injector testController(Class<? extends Controller> ctrlr,
+ String methodName, Class<T> api, T impl, Module... modules) {
+ try {
+ Injector injector = createMockInjector(api, impl, modules);
+ Method method = ctrlr.getMethod(methodName, (Class<?>[])null);
+ method.invoke(injector.getInstance(ctrlr), (Object[])null);
+ return injector;
+ } catch (Exception e) {
+ throw new WebAppException(e);
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public static <T> Injector testController(Class<? extends Controller> ctrlr,
+ String methodName) {
+ return testController(ctrlr, methodName, null, null);
+ }
+
+ public static <T> Injector testPage(Class<? extends View> page, Class<T> api,
+ T impl, Module... modules) {
+ Injector injector = createMockInjector(api, impl, modules);
+ injector.getInstance(page).render();
+ flushOutput(injector);
+ return injector;
+ }
+
+ // convenience
+ @SuppressWarnings("unchecked")
+ public static <T> Injector testPage(Class<? extends View> page) {
+ return testPage(page, null, null);
+ }
+
+ public static <T> Injector testBlock(Class<? extends SubView> block,
+ Class<T> api, T impl, Module... modules) {
+ Injector injector = createMockInjector(api, impl, modules);
+ injector.getInstance(block).renderPartial();
+ flushOutput(injector);
+ return injector;
+ }
+
+ // convenience
+ @SuppressWarnings("unchecked")
+ public static <T> Injector testBlock(Class<? extends SubView> block) {
+ return testBlock(block, null, null);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestCommonViews.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestCommonViews.java
new file mode 100644
index 0000000..b533bce
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestCommonViews.java
@@ -0,0 +1,56 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+import com.google.inject.Injector;
+
+import org.apache.hadoop.yarn.webapp.ResponseInfo;
+import org.apache.hadoop.yarn.webapp.test.WebAppTests;
+import org.apache.hadoop.yarn.webapp.view.ErrorPage;
+import org.apache.hadoop.yarn.webapp.view.FooterBlock;
+import org.apache.hadoop.yarn.webapp.view.HeaderBlock;
+import org.apache.hadoop.yarn.webapp.view.JQueryUI;
+
+import org.junit.Test;
+import static org.mockito.Mockito.*;
+
+public class TestCommonViews {
+
+ @Test public void testErrorPage() {
+ Injector injector = WebAppTests.testPage(ErrorPage.class);
+
+ }
+
+ @Test public void testHeaderBlock() {
+ WebAppTests.testBlock(HeaderBlock.class);
+ }
+
+ @Test public void testFooterBlock() {
+ WebAppTests.testBlock(FooterBlock.class);
+ }
+
+ @Test public void testJQueryUI() {
+ WebAppTests.testBlock(JQueryUI.class);
+ }
+
+ @Test public void testInfoBlock() {
+ Injector injector = WebAppTests.createMockInjector(this);
+ ResponseInfo info = injector.getInstance(ResponseInfo.class);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlBlock.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlBlock.java
new file mode 100644
index 0000000..89042c6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlBlock.java
@@ -0,0 +1,74 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+import com.google.inject.Injector;
+
+import java.io.PrintWriter;
+
+import org.apache.hadoop.yarn.webapp.WebAppException;
+import org.apache.hadoop.yarn.webapp.test.WebAppTests;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+import org.apache.hadoop.yarn.webapp.view.HtmlPage;
+
+import org.junit.Test;
+import static org.mockito.Mockito.*;
+
+public class TestHtmlBlock {
+ public static class TestBlock extends HtmlBlock {
+ @Override
+ public void render(Block html) {
+ html.
+ p("#testid")._("test note")._();
+ }
+ }
+
+ public static class ShortBlock extends HtmlBlock {
+ @Override
+ public void render(Block html) {
+ html.
+ p()._("should throw");
+ }
+ }
+
+ public static class ShortPage extends HtmlPage {
+ @Override
+ public void render(Page.HTML<_> html) {
+ html.
+ title("short test").
+ _(ShortBlock.class);
+ }
+ }
+
+ @Test public void testUsual() {
+ Injector injector = WebAppTests.testBlock(TestBlock.class);
+ PrintWriter out = injector.getInstance(PrintWriter.class);
+
+ verify(out).print(" id=\"testid\"");
+ verify(out).print("test note");
+ }
+
+ @Test(expected=WebAppException.class) public void testShortBlock() {
+ WebAppTests.testBlock(ShortBlock.class);
+ }
+
+ @Test(expected=WebAppException.class) public void testShortPage() {
+ WebAppTests.testPage(ShortPage.class);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlPage.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlPage.java
new file mode 100644
index 0000000..d99384b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlPage.java
@@ -0,0 +1,64 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+import com.google.inject.Injector;
+
+import java.io.PrintWriter;
+
+import org.apache.hadoop.yarn.webapp.WebAppException;
+import org.apache.hadoop.yarn.webapp.test.WebAppTests;
+import org.apache.hadoop.yarn.webapp.view.HtmlPage;
+
+import org.junit.Test;
+import static org.mockito.Mockito.*;
+
+public class TestHtmlPage {
+
+ public static class TestView extends HtmlPage {
+ @Override
+ public void render(Page.HTML<_> html) {
+ html.
+ title("test").
+ p("#testid")._("test note")._()._();
+ }
+ }
+
+ public static class ShortView extends HtmlPage {
+ @Override
+ public void render(Page.HTML<_> html) {
+ html.
+ title("short test").
+ p()._("should throw");
+ }
+ }
+
+ @Test public void testUsual() {
+ Injector injector = WebAppTests.testPage(TestView.class);
+ PrintWriter out = injector.getInstance(PrintWriter.class);
+
+ verify(out).print("test");
+ verify(out).print(" id=\"testid\"");
+ verify(out).print("test note");
+ }
+
+ @Test(expected=WebAppException.class) public void testShort() {
+ WebAppTests.testPage(ShortView.class);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestTwoColumnCssPage.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestTwoColumnCssPage.java
new file mode 100644
index 0000000..c4a960c2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestTwoColumnCssPage.java
@@ -0,0 +1,70 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+import org.apache.hadoop.yarn.MockApps;
+import org.apache.hadoop.yarn.webapp.Controller;
+import org.apache.hadoop.yarn.webapp.WebApps;
+import org.apache.hadoop.yarn.webapp.test.WebAppTests;
+import org.apache.hadoop.yarn.webapp.view.HtmlPage;
+import org.apache.hadoop.yarn.webapp.view.TwoColumnCssLayout;
+import org.junit.Test;
+
+public class TestTwoColumnCssPage {
+
+ public static class TestController extends Controller {
+ @Override
+ public void index() {
+ set("title", "Testing a Two Column Layout");
+ set("ui.accordion.id", "nav");
+ set("ui.themeswitcher.id", "themeswitcher");
+ render(TwoColumnCssLayout.class);
+ }
+
+ public void names() {
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < 8; ++i) {
+ sb.append(MockApps.newAppName()).append(' ');
+ }
+ setTitle(sb.toString());
+ }
+
+ public void textnames() {
+ names();
+ renderText($("title"));
+ }
+ }
+
+ public static class TestView extends HtmlPage {
+ @Override
+ public void render(Page.HTML<_> html) {
+ html.
+ title($("title")).
+ h1($("title"))._();
+ }
+ }
+
+ @Test public void shouldNotThrow() {
+ WebAppTests.testPage(TwoColumnCssLayout.class);
+ }
+
+ public static void main(String[] args) {
+ WebApps.$for("test").at(8888).inDevMode().start().joinThread();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestTwoColumnLayout.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestTwoColumnLayout.java
new file mode 100644
index 0000000..a3b1b26
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestTwoColumnLayout.java
@@ -0,0 +1,45 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.view;
+
+import org.apache.hadoop.yarn.webapp.Controller;
+import org.apache.hadoop.yarn.webapp.WebApps;
+import org.apache.hadoop.yarn.webapp.test.WebAppTests;
+import org.junit.Test;
+
+public class TestTwoColumnLayout {
+
+ public static class TestController extends Controller {
+ @Override
+ public void index() {
+ setTitle("Test the two column table layout");
+ set("ui.accordion.id", "nav");
+ set("ui.themeswitcher.id", "themeswitcher");
+ render(TwoColumnLayout.class);
+ }
+ }
+
+ @Test public void shouldNotThrow() {
+ WebAppTests.testPage(TwoColumnLayout.class);
+ }
+
+ public static void main(String[] args) {
+ WebApps.$for("test").at(8888).inDevMode().start().joinThread();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
new file mode 100644
index 0000000..eab8b6f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -0,0 +1,94 @@
+<?xml version="1.0"?>
+<project>
+ <parent>
+ <artifactId>hadoop-yarn-server</artifactId>
+ <groupId>org.apache.hadoop</groupId>
+ <version>${yarn.version}</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-common</artifactId>
+ <name>hadoop-yarn-server-common</name>
+
+ <properties>
+ <install.file>${project.artifact.file}</install.file>
+ <yarn.basedir>${project.parent.parent.basedir}</yarn.basedir>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.zookeeper</groupId>
+ <artifactId>zookeeper</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>create-protobuf-generated-sources-directory</id>
+ <phase>initialize</phase>
+ <configuration>
+ <target>
+ <mkdir dir="target/generated-sources/proto" />
+ </target>
+ </configuration>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>generate-sources</id>
+ <phase>generate-sources</phase>
+ <configuration>
+ <executable>protoc</executable>
+ <arguments>
+ <argument>-I../../hadoop-yarn-api/src/main/proto/</argument>
+ <argument>-Isrc/main/proto/</argument>
+ <argument>--java_out=target/generated-sources/proto</argument>
+ <argument>src/main/proto/yarn_server_common_protos.proto</argument>
+ <argument>src/main/proto/yarn_server_common_service_protos.proto</argument>
+ <argument>src/main/proto/ResourceTracker.proto</argument>
+ </arguments>
+ </configuration>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>add-source</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>add-source</goal>
+ </goals>
+ <configuration>
+ <sources>
+ <source>target/generated-sources/proto</source>
+ </sources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/avro/ResourceTracker.genavro b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/avro/ResourceTracker.genavro
new file mode 100644
index 0000000..b1da44f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/avro/ResourceTracker.genavro
@@ -0,0 +1,40 @@
+@namespace("org.apache.hadoop.yarn")
+protocol ResourceTracker {
+
+ import idl "yarn/yarn-api/src/main/avro/yarn-types.genavro";
+
+ // ResourceTracker
+ record NodeID {
+ int id;
+ }
+
+ record NodeHealthStatus {
+ boolean isNodeHealthy;
+ union {string, null} healthReport;
+ long lastHealthReportTime;
+ }
+
+ record NodeStatus {
+ NodeID nodeId;
+ int responseId;
+ long lastSeen;
+ map<array<org.apache.hadoop.yarn.Container>> containers;
+ NodeHealthStatus nodeHealthStatus;
+ }
+
+ record RegistrationResponse {
+ NodeID nodeID;
+ union {bytes, null} secretKey;
+ }
+
+ record HeartbeatResponse {
+ int responseId;
+ boolean reboot;
+ array<org.apache.hadoop.yarn.Container> containersToCleanup;
+ array<org.apache.hadoop.yarn.ApplicationID> appplicationsToCleanup;
+ }
+
+ RegistrationResponse registerNodeManager(string node, org.apache.hadoop.yarn.Resource resource) throws YarnRemoteException;
+ HeartbeatResponse nodeHeartbeat(NodeStatus nodeStatus) throws YarnRemoteException;
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/NodeHealthCheckerService.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/NodeHealthCheckerService.java
new file mode 100644
index 0000000..908fc21
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/NodeHealthCheckerService.java
@@ -0,0 +1,384 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Timer;
+import java.util.TimerTask;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.Shell.ExitCodeException;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.service.AbstractService;
+
+/**
+ *
+ * The class which provides functionality of checking the health of the node and
+ * reporting back to the service for which the health checker has been asked to
+ * report.
+ */
+public class NodeHealthCheckerService extends AbstractService {
+
+ private static Log LOG = LogFactory.getLog(NodeHealthCheckerService.class);
+
+ /** Absolute path to the health script. */
+ private String nodeHealthScript;
+ /** Delay after which node health script to be executed */
+ private long intervalTime;
+ /** Time after which the script should be timedout */
+ private long scriptTimeout;
+ /** Timer used to schedule node health monitoring script execution */
+ private Timer nodeHealthScriptScheduler;
+
+ /** ShellCommandExecutor used to execute monitoring script */
+ ShellCommandExecutor shexec = null;
+
+ /** Configuration used by the checker */
+ private Configuration conf;
+
+ /** Pattern used for searching in the output of the node health script */
+ static private final String ERROR_PATTERN = "ERROR";
+
+ /* Configuration keys */
+ public static final String HEALTH_CHECK_SCRIPT_PROPERTY =
+ "yarn.server.nodemanager.healthchecker.script.path";
+
+ public static final String HEALTH_CHECK_INTERVAL_PROPERTY =
+ "yarn.server.nodemanager.healthchecker.interval";
+
+ public static final String HEALTH_CHECK_FAILURE_INTERVAL_PROPERTY =
+ "yarn.server.nodemanager.healthchecker.script.timeout";
+
+ public static final String HEALTH_CHECK_SCRIPT_ARGUMENTS_PROPERTY =
+ "yarn.server.nodemanager.healthchecker.script.args";
+
+ /* end of configuration keys */
+ /** Time out error message */
+ static final String NODE_HEALTH_SCRIPT_TIMED_OUT_MSG = "Node health script timed out";
+
+ /** Default frequency of running node health script */
+ private static final long DEFAULT_HEALTH_CHECK_INTERVAL = 10 * 60 * 1000;
+ /** Default script time out period */
+ private static final long DEFAULT_HEALTH_SCRIPT_FAILURE_INTERVAL = 2 * DEFAULT_HEALTH_CHECK_INTERVAL;
+
+ private boolean isHealthy;
+
+ private String healthReport;
+
+ private long lastReportedTime;
+
+ private TimerTask timer;
+
+
+ private enum HealthCheckerExitStatus {
+ SUCCESS,
+ TIMED_OUT,
+ FAILED_WITH_EXIT_CODE,
+ FAILED_WITH_EXCEPTION,
+ FAILED
+ }
+
+
+ /**
+ * Class which is used by the {@link Timer} class to periodically execute the
+ * node health script.
+ *
+ */
+ private class NodeHealthMonitorExecutor extends TimerTask {
+
+ String exceptionStackTrace = "";
+
+ public NodeHealthMonitorExecutor(String[] args) {
+ ArrayList<String> execScript = new ArrayList<String>();
+ execScript.add(nodeHealthScript);
+ if (args != null) {
+ execScript.addAll(Arrays.asList(args));
+ }
+ shexec = new ShellCommandExecutor(execScript
+ .toArray(new String[execScript.size()]), null, null, scriptTimeout);
+ }
+
+ @Override
+ public void run() {
+ HealthCheckerExitStatus status = HealthCheckerExitStatus.SUCCESS;
+ try {
+ shexec.execute();
+ } catch (ExitCodeException e) {
+ // ignore the exit code of the script
+ status = HealthCheckerExitStatus.FAILED_WITH_EXIT_CODE;
+ } catch (Exception e) {
+ LOG.warn("Caught exception : " + e.getMessage());
+ if (!shexec.isTimedOut()) {
+ status = HealthCheckerExitStatus.FAILED_WITH_EXCEPTION;
+ } else {
+ status = HealthCheckerExitStatus.TIMED_OUT;
+ }
+ exceptionStackTrace = StringUtils.stringifyException(e);
+ } finally {
+ if (status == HealthCheckerExitStatus.SUCCESS) {
+ if (hasErrors(shexec.getOutput())) {
+ status = HealthCheckerExitStatus.FAILED;
+ }
+ }
+ reportHealthStatus(status);
+ }
+ }
+
+ /**
+ * Method which is used to parse output from the node health monitor and
+ * send to the report address.
+ *
+ * The timed out script or script which causes IOException output is
+ * ignored.
+ *
+ * The node is marked unhealthy if
+ * <ol>
+ * <li>The node health script times out</li>
+ * <li>The node health scripts output has a line which begins with ERROR</li>
+ * <li>An exception is thrown while executing the script</li>
+ * </ol>
+ * If the script throws {@link IOException} or {@link ExitCodeException} the
+ * output is ignored and node is left remaining healthy, as script might
+ * have syntax error.
+ *
+ * @param status
+ */
+ void reportHealthStatus(HealthCheckerExitStatus status) {
+ long now = System.currentTimeMillis();
+ switch (status) {
+ case SUCCESS:
+ setHealthStatus(true, "", now);
+ break;
+ case TIMED_OUT:
+ setHealthStatus(false, NODE_HEALTH_SCRIPT_TIMED_OUT_MSG);
+ break;
+ case FAILED_WITH_EXCEPTION:
+ setHealthStatus(false, exceptionStackTrace);
+ break;
+ case FAILED_WITH_EXIT_CODE:
+ setHealthStatus(true, "", now);
+ break;
+ case FAILED:
+ setHealthStatus(false, shexec.getOutput());
+ break;
+ }
+ }
+
+ /**
+ * Method to check if the output string has line which begins with ERROR.
+ *
+ * @param output
+ * string
+ * @return true if output string has error pattern in it.
+ */
+ private boolean hasErrors(String output) {
+ String[] splits = output.split("\n");
+ for (String split : splits) {
+ if (split.startsWith(ERROR_PATTERN)) {
+ return true;
+ }
+ }
+ return false;
+ }
+ }
+
+ public NodeHealthCheckerService() {
+ super(NodeHealthCheckerService.class.getName());
+ this.lastReportedTime = System.currentTimeMillis();
+ this.isHealthy = true;
+ this.healthReport = "";
+ }
+
+ public NodeHealthCheckerService(Configuration conf) {
+ this();
+ init(conf);
+ }
+
+ /*
+ * Method which initializes the values for the script path and interval time.
+ */
+ @Override
+ public void init(Configuration conf) {
+ this.conf = conf;
+ this.nodeHealthScript =
+ conf.get(HEALTH_CHECK_SCRIPT_PROPERTY);
+ this.intervalTime = conf.getLong(HEALTH_CHECK_INTERVAL_PROPERTY,
+ DEFAULT_HEALTH_CHECK_INTERVAL);
+ this.scriptTimeout = conf.getLong(
+ HEALTH_CHECK_FAILURE_INTERVAL_PROPERTY,
+ DEFAULT_HEALTH_SCRIPT_FAILURE_INTERVAL);
+ String[] args = conf.getStrings(HEALTH_CHECK_SCRIPT_ARGUMENTS_PROPERTY,
+ new String[] {});
+ timer = new NodeHealthMonitorExecutor(args);
+ }
+
+ /**
+ * Method used to start the Node health monitoring.
+ *
+ */
+ @Override
+ public void start() {
+ // if health script path is not configured don't start the thread.
+ if (!shouldRun(conf)) {
+ LOG.info("Not starting node health monitor");
+ return;
+ }
+ nodeHealthScriptScheduler = new Timer("NodeHealthMonitor-Timer", true);
+ // Start the timer task immediately and
+ // then periodically at interval time.
+ nodeHealthScriptScheduler.scheduleAtFixedRate(timer, 0, intervalTime);
+ }
+
+ /**
+ * Method used to terminate the node health monitoring service.
+ *
+ */
+ @Override
+ public void stop() {
+ if (!shouldRun(conf)) {
+ return;
+ }
+ nodeHealthScriptScheduler.cancel();
+ if (shexec != null) {
+ Process p = shexec.getProcess();
+ if (p != null) {
+ p.destroy();
+ }
+ }
+ }
+
+ /**
+ * Gets the if the node is healthy or not
+ *
+ * @return true if node is healthy
+ */
+ private boolean isHealthy() {
+ return isHealthy;
+ }
+
+ /**
+ * Sets if the node is healhty or not.
+ *
+ * @param isHealthy
+ * if or not node is healthy
+ */
+ private synchronized void setHealthy(boolean isHealthy) {
+ this.isHealthy = isHealthy;
+ }
+
+ /**
+ * Returns output from health script. if node is healthy then an empty string
+ * is returned.
+ *
+ * @return output from health script
+ */
+ private String getHealthReport() {
+ return healthReport;
+ }
+
+ /**
+ * Sets the health report from the node health script.
+ *
+ * @param healthReport
+ */
+ private synchronized void setHealthReport(String healthReport) {
+ this.healthReport = healthReport;
+ }
+
+ /**
+ * Returns time stamp when node health script was last run.
+ *
+ * @return timestamp when node health script was last run
+ */
+ private long getLastReportedTime() {
+ return lastReportedTime;
+ }
+
+ /**
+ * Sets the last run time of the node health script.
+ *
+ * @param lastReportedTime
+ */
+ private synchronized void setLastReportedTime(long lastReportedTime) {
+ this.lastReportedTime = lastReportedTime;
+ }
+
+ /**
+ * Method used to determine if or not node health monitoring service should be
+ * started or not. Returns true if following conditions are met:
+ *
+ * <ol>
+ * <li>Path to Node health check script is not empty</li>
+ * <li>Node health check script file exists</li>
+ * </ol>
+ *
+ * @param conf
+ * @return true if node health monitoring service can be started.
+ */
+ public static boolean shouldRun(Configuration conf) {
+ String nodeHealthScript =
+ conf.get(HEALTH_CHECK_SCRIPT_PROPERTY);
+ if (nodeHealthScript == null || nodeHealthScript.trim().isEmpty()) {
+ return false;
+ }
+ File f = new File(nodeHealthScript);
+ return f.exists() && f.canExecute();
+ }
+
+ private synchronized void setHealthStatus(boolean isHealthy, String output) {
+ this.setHealthy(isHealthy);
+ this.setHealthReport(output);
+ }
+
+ private synchronized void setHealthStatus(boolean isHealthy, String output,
+ long time) {
+ this.setHealthStatus(isHealthy, output);
+ this.setLastReportedTime(time);
+ }
+
+ /**
+ * Method to populate the fields for the {@link NodeHealthStatus}
+ *
+ * @param healthStatus
+ */
+ public synchronized void setHealthStatus(NodeHealthStatus healthStatus) {
+ healthStatus.setIsNodeHealthy(this.isHealthy());
+ healthStatus.setHealthReport(this.getHealthReport());
+ healthStatus.setLastHealthReportTime(this.getLastReportedTime());
+ }
+
+ /**
+ * Test method to directly access the timer which node
+ * health checker would use.
+ *
+ *
+ * @return Timer task
+ */
+ //XXX:Not to be used directly.
+ TimerTask getTimer() {
+ return timer;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/lib/ZKClient.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/lib/ZKClient.java
new file mode 100644
index 0000000..84e41ce
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/lib/ZKClient.java
@@ -0,0 +1,133 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.lib;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.ZooDefs;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.data.Stat;
+
+/** ZK Registration Library
+ * currently does not use any authorization
+ */
+public class ZKClient {
+ private ZooKeeper zkClient;
+
+ /**
+ * the zookeeper client library to
+ * talk to zookeeper
+ * @param string the host
+ * @throws throws IOException
+ */
+ public ZKClient(String string) throws IOException {
+ zkClient = new ZooKeeper(string, 30000, new ZKWatcher());
+ }
+
+ /**
+ * register the service to a specific path
+ * @param path the path in zookeeper namespace to register to
+ * @param data the data that is part of this registration
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public void registerService(String path, String data) throws
+ IOException, InterruptedException {
+ try {
+ zkClient.create(path, data.getBytes(), ZooDefs.Ids.OPEN_ACL_UNSAFE,
+ CreateMode.EPHEMERAL);
+ } catch(KeeperException ke) {
+ throw new IOException(ke);
+ }
+ }
+
+ /**
+ * unregister the service.
+ * @param path the path at which the service was registered
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public void unregisterService(String path) throws IOException,
+ InterruptedException {
+ try {
+ zkClient.delete(path, -1);
+ } catch(KeeperException ke) {
+ throw new IOException(ke);
+ }
+ }
+
+ /**
+ * list the services registered under a path
+ * @param path the path under which services are
+ * registered
+ * @return the list of names of services registered
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public List<String> listServices(String path) throws IOException,
+ InterruptedException {
+ List<String> children = null;
+ try {
+ children = zkClient.getChildren(path, false);
+ } catch(KeeperException ke) {
+ throw new IOException(ke);
+ }
+ return children;
+ }
+
+ /**
+ * get data published by the service at the registration address
+ * @param path the path where the service is registered
+ * @return the data of the registered service
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public String getServiceData(String path) throws IOException,
+ InterruptedException {
+ String data;
+ try {
+ Stat stat = new Stat();
+ byte[] byteData = zkClient.getData(path, false, stat);
+ data = new String(byteData);
+ } catch(KeeperException ke) {
+ throw new IOException(ke);
+ }
+ return data;
+ }
+
+
+ /**
+ * a watcher class that handles what events from
+ * zookeeper.
+ *
+ */
+ private static class ZKWatcher implements Watcher {
+
+ @Override
+ public void process(WatchedEvent arg0) {
+
+ }
+
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/RMNMSecurityInfoClass.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/RMNMSecurityInfoClass.java
new file mode 100644
index 0000000..39861d4
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/RMNMSecurityInfoClass.java
@@ -0,0 +1,61 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server;
+
+import java.lang.annotation.Annotation;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.proto.ResourceTracker;
+
+public class RMNMSecurityInfoClass extends SecurityInfo {
+
+ @Override
+ public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
+ if (!protocol.equals(ResourceTracker.ResourceTrackerService.BlockingInterface.class)) {
+ return null;
+ }
+ return new KerberosInfo() {
+
+ @Override
+ public Class<? extends Annotation> annotationType() {
+ return null;
+ }
+
+ @Override
+ public String serverPrincipal() {
+ return YarnConfiguration.RM_SERVER_PRINCIPAL_KEY;
+ }
+
+ @Override
+ public String clientPrincipal() {
+ return YarnServerConfig.NM_SERVER_PRINCIPAL_KEY;
+ }
+ };
+ }
+
+ @Override
+ public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
+ return null;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/YarnServerConfig.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/YarnServerConfig.java
new file mode 100644
index 0000000..2d7b561
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/YarnServerConfig.java
@@ -0,0 +1,30 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server;
+
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+
+public class YarnServerConfig {
+ public static final String NM_SERVER_PRINCIPAL_KEY =
+ "yarn.nodemanager.principal";
+ public static final String RESOURCETRACKER_ADDRESS =
+ YarnConfiguration.RM_PREFIX + "resourcetracker.address";
+ public static final String DEFAULT_RESOURCETRACKER_BIND_ADDRESS =
+ "0.0.0.0:8020";
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ResourceTracker.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ResourceTracker.java
new file mode 100644
index 0000000..a45dcb3
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ResourceTracker.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.api;
+
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+
+public interface ResourceTracker {
+
+ public RegisterNodeManagerResponse registerNodeManager(RegisterNodeManagerRequest request) throws YarnRemoteException;
+ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) throws YarnRemoteException;
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java
new file mode 100644
index 0000000..76a0817
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java
@@ -0,0 +1,70 @@
+package org.apache.hadoop.yarn.server.api.impl.pb.client;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine;
+import org.apache.hadoop.yarn.proto.ResourceTracker.ResourceTrackerService;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProto;
+import org.apache.hadoop.yarn.server.api.ResourceTracker;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatResponsePBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerResponsePBImpl;
+
+import com.google.protobuf.ServiceException;
+
+public class ResourceTrackerPBClientImpl implements ResourceTracker {
+
+private ResourceTrackerService.BlockingInterface proxy;
+
+ public ResourceTrackerPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException {
+ RPC.setProtocolEngine(conf, ResourceTrackerService.BlockingInterface.class, ProtoOverHadoopRpcEngine.class);
+ proxy = (ResourceTrackerService.BlockingInterface)RPC.getProxy(
+ ResourceTrackerService.BlockingInterface.class, clientVersion, addr, conf);
+ }
+
+ @Override
+ public RegisterNodeManagerResponse registerNodeManager(
+ RegisterNodeManagerRequest request) throws YarnRemoteException {
+ RegisterNodeManagerRequestProto requestProto = ((RegisterNodeManagerRequestPBImpl)request).getProto();
+ try {
+ return new RegisterNodeManagerResponsePBImpl(proxy.registerNodeManager(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
+ throws YarnRemoteException {
+ NodeHeartbeatRequestProto requestProto = ((NodeHeartbeatRequestPBImpl)request).getProto();
+ try {
+ return new NodeHeartbeatResponsePBImpl(proxy.nodeHeartbeat(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceTrackerPBServiceImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceTrackerPBServiceImpl.java
new file mode 100644
index 0000000..3ada4b2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceTrackerPBServiceImpl.java
@@ -0,0 +1,53 @@
+package org.apache.hadoop.yarn.server.api.impl.pb.service;
+
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.proto.ResourceTracker.ResourceTrackerService.BlockingInterface;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerResponseProto;
+import org.apache.hadoop.yarn.server.api.ResourceTracker;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatResponsePBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerResponsePBImpl;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+public class ResourceTrackerPBServiceImpl implements BlockingInterface {
+
+ private ResourceTracker real;
+
+ public ResourceTrackerPBServiceImpl(ResourceTracker impl) {
+ this.real = impl;
+ }
+
+ @Override
+ public RegisterNodeManagerResponseProto registerNodeManager(
+ RpcController controller, RegisterNodeManagerRequestProto proto)
+ throws ServiceException {
+ RegisterNodeManagerRequestPBImpl request = new RegisterNodeManagerRequestPBImpl(proto);
+ try {
+ RegisterNodeManagerResponse response = real.registerNodeManager(request);
+ return ((RegisterNodeManagerResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public NodeHeartbeatResponseProto nodeHeartbeat(RpcController controller,
+ NodeHeartbeatRequestProto proto) throws ServiceException {
+ NodeHeartbeatRequestPBImpl request = new NodeHeartbeatRequestPBImpl(proto);
+ try {
+ NodeHeartbeatResponse response = real.nodeHeartbeat(request);
+ return ((NodeHeartbeatResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
new file mode 100644
index 0000000..7072f47
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
@@ -0,0 +1,10 @@
+package org.apache.hadoop.yarn.server.api.protocolrecords;
+
+import org.apache.hadoop.yarn.server.api.records.NodeStatus;
+
+
+public interface NodeHeartbeatRequest {
+ public abstract NodeStatus getNodeStatus();
+
+ public abstract void setNodeStatus(NodeStatus status);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatResponse.java
new file mode 100644
index 0000000..4440260
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatResponse.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.yarn.server.api.protocolrecords;
+
+import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
+
+public interface NodeHeartbeatResponse {
+ public abstract HeartbeatResponse getHeartbeatResponse();
+
+ public abstract void setHeartbeatResponse(HeartbeatResponse heartbeatResponse);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java
new file mode 100644
index 0000000..ac9ee77
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java
@@ -0,0 +1,32 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.api.protocolrecords;
+
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Resource;
+
+public interface RegisterNodeManagerRequest {
+ NodeId getNodeId();
+ int getHttpPort();
+ Resource getResource();
+
+ void setNodeId(NodeId nodeId);
+ void setHttpPort(int port);
+ void setResource(Resource resource);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerResponse.java
new file mode 100644
index 0000000..ff25500
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerResponse.java
@@ -0,0 +1,10 @@
+package org.apache.hadoop.yarn.server.api.protocolrecords;
+
+import org.apache.hadoop.yarn.server.api.records.RegistrationResponse;
+
+public interface RegisterNodeManagerResponse {
+ public abstract RegistrationResponse getRegistrationResponse();
+
+ public abstract void setRegistrationResponse(RegistrationResponse registrationResponse);
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
new file mode 100644
index 0000000..3dd3749
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
+import org.apache.hadoop.yarn.server.api.records.NodeStatus;
+import org.apache.hadoop.yarn.server.api.records.impl.pb.NodeStatusPBImpl;
+
+
+
+public class NodeHeartbeatRequestPBImpl extends ProtoBase<NodeHeartbeatRequestProto> implements NodeHeartbeatRequest {
+ NodeHeartbeatRequestProto proto = NodeHeartbeatRequestProto.getDefaultInstance();
+ NodeHeartbeatRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private NodeStatus nodeStatus = null;
+
+
+ public NodeHeartbeatRequestPBImpl() {
+ builder = NodeHeartbeatRequestProto.newBuilder();
+ }
+
+ public NodeHeartbeatRequestPBImpl(NodeHeartbeatRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public NodeHeartbeatRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.nodeStatus != null) {
+ builder.setNodeStatus(convertToProtoFormat(this.nodeStatus));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = NodeHeartbeatRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public NodeStatus getNodeStatus() {
+ NodeHeartbeatRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.nodeStatus != null) {
+ return this.nodeStatus;
+ }
+ if (!p.hasNodeStatus()) {
+ return null;
+ }
+ this.nodeStatus = convertFromProtoFormat(p.getNodeStatus());
+ return this.nodeStatus;
+ }
+
+ @Override
+ public void setNodeStatus(NodeStatus nodeStatus) {
+ maybeInitBuilder();
+ if (nodeStatus == null)
+ builder.clearNodeStatus();
+ this.nodeStatus = nodeStatus;
+ }
+
+ private NodeStatusPBImpl convertFromProtoFormat(NodeStatusProto p) {
+ return new NodeStatusPBImpl(p);
+ }
+
+ private NodeStatusProto convertToProtoFormat(NodeStatus t) {
+ return ((NodeStatusPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
new file mode 100644
index 0000000..a3d9a1d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
@@ -0,0 +1,91 @@
+package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.HeartbeatResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatResponseProtoOrBuilder;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
+import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
+import org.apache.hadoop.yarn.server.api.records.impl.pb.HeartbeatResponsePBImpl;
+
+
+
+public class NodeHeartbeatResponsePBImpl extends ProtoBase<NodeHeartbeatResponseProto> implements NodeHeartbeatResponse {
+ NodeHeartbeatResponseProto proto = NodeHeartbeatResponseProto.getDefaultInstance();
+ NodeHeartbeatResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private HeartbeatResponse heartbeatResponse = null;
+
+
+ public NodeHeartbeatResponsePBImpl() {
+ builder = NodeHeartbeatResponseProto.newBuilder();
+ }
+
+ public NodeHeartbeatResponsePBImpl(NodeHeartbeatResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public NodeHeartbeatResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.heartbeatResponse != null) {
+ builder.setHeartbeatResponse(convertToProtoFormat(this.heartbeatResponse));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = NodeHeartbeatResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public HeartbeatResponse getHeartbeatResponse() {
+ NodeHeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.heartbeatResponse != null) {
+ return this.heartbeatResponse;
+ }
+ if (!p.hasHeartbeatResponse()) {
+ return null;
+ }
+ this.heartbeatResponse = convertFromProtoFormat(p.getHeartbeatResponse());
+ return this.heartbeatResponse;
+ }
+
+ @Override
+ public void setHeartbeatResponse(HeartbeatResponse heartbeatResponse) {
+ maybeInitBuilder();
+ if (heartbeatResponse == null)
+ builder.clearHeartbeatResponse();
+ this.heartbeatResponse = heartbeatResponse;
+ }
+
+ private HeartbeatResponsePBImpl convertFromProtoFormat(HeartbeatResponseProto p) {
+ return new HeartbeatResponsePBImpl(p);
+ }
+
+ private HeartbeatResponseProto convertToProtoFormat(HeartbeatResponse t) {
+ return ((HeartbeatResponsePBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java
new file mode 100644
index 0000000..b915d47
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java
@@ -0,0 +1,142 @@
+package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
+
+
+
+public class RegisterNodeManagerRequestPBImpl extends ProtoBase<RegisterNodeManagerRequestProto> implements RegisterNodeManagerRequest {
+ RegisterNodeManagerRequestProto proto = RegisterNodeManagerRequestProto.getDefaultInstance();
+ RegisterNodeManagerRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private Resource resource = null;
+ private NodeId nodeId = null;
+
+ public RegisterNodeManagerRequestPBImpl() {
+ builder = RegisterNodeManagerRequestProto.newBuilder();
+ }
+
+ public RegisterNodeManagerRequestPBImpl(RegisterNodeManagerRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public RegisterNodeManagerRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.resource != null) {
+ builder.setResource(convertToProtoFormat(this.resource));
+ }
+ if (this.nodeId != null) {
+ builder.setNodeId(convertToProtoFormat(this.nodeId));
+ }
+
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = RegisterNodeManagerRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public Resource getResource() {
+ RegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.resource != null) {
+ return this.resource;
+ }
+ if (!p.hasResource()) {
+ return null;
+ }
+ this.resource = convertFromProtoFormat(p.getResource());
+ return this.resource;
+ }
+
+ @Override
+ public void setResource(Resource resource) {
+ maybeInitBuilder();
+ if (resource == null)
+ builder.clearResource();
+ this.resource = resource;
+ }
+
+ @Override
+ public NodeId getNodeId() {
+ RegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.nodeId != null) {
+ return this.nodeId;
+ }
+ if (!p.hasNodeId()) {
+ return null;
+ }
+ this.nodeId = convertFromProtoFormat(p.getNodeId());
+ return this.nodeId;
+ }
+
+ @Override
+ public void setNodeId(NodeId nodeId) {
+ maybeInitBuilder();
+ if (nodeId == null)
+ builder.clearNodeId();
+ this.nodeId = nodeId;
+ }
+
+ @Override
+ public int getHttpPort() {
+ RegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasHttpPort()) {
+ return 0;
+ }
+ return (p.getHttpPort());
+ }
+
+ @Override
+ public void setHttpPort(int httpPort) {
+ maybeInitBuilder();
+ builder.setHttpPort(httpPort);
+ }
+
+ private NodeIdPBImpl convertFromProtoFormat(NodeIdProto p) {
+ return new NodeIdPBImpl(p);
+ }
+
+ private NodeIdProto convertToProtoFormat(NodeId t) {
+ return ((NodeIdPBImpl)t).getProto();
+ }
+
+ private ResourcePBImpl convertFromProtoFormat(ResourceProto p) {
+ return new ResourcePBImpl(p);
+ }
+
+ private ResourceProto convertToProtoFormat(Resource t) {
+ return ((ResourcePBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerResponsePBImpl.java
new file mode 100644
index 0000000..47f5412
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerResponsePBImpl.java
@@ -0,0 +1,97 @@
+package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.RegistrationResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerResponseProtoOrBuilder;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.records.RegistrationResponse;
+import org.apache.hadoop.yarn.server.api.records.impl.pb.RegistrationResponsePBImpl;
+
+
+
+public class RegisterNodeManagerResponsePBImpl extends ProtoBase<RegisterNodeManagerResponseProto> implements RegisterNodeManagerResponse {
+ RegisterNodeManagerResponseProto proto = RegisterNodeManagerResponseProto.getDefaultInstance();
+ RegisterNodeManagerResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private RegistrationResponse registartionResponse = null;
+
+ private boolean rebuild = false;
+
+ public RegisterNodeManagerResponsePBImpl() {
+ builder = RegisterNodeManagerResponseProto.newBuilder();
+ }
+
+ public RegisterNodeManagerResponsePBImpl(RegisterNodeManagerResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public RegisterNodeManagerResponseProto getProto() {
+ if (rebuild)
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.registartionResponse != null) {
+ builder.setRegistrationResponse(convertToProtoFormat(this.registartionResponse));
+ this.registartionResponse = null;
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ rebuild = false;
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = RegisterNodeManagerResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public RegistrationResponse getRegistrationResponse() {
+ RegisterNodeManagerResponseProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.registartionResponse != null) {
+ return this.registartionResponse;
+ }
+ if (!p.hasRegistrationResponse()) {
+ return null;
+ }
+ this.registartionResponse = convertFromProtoFormat(p.getRegistrationResponse());
+ rebuild = true;
+ return this.registartionResponse;
+ }
+
+ @Override
+ public void setRegistrationResponse(RegistrationResponse registrationResponse) {
+ maybeInitBuilder();
+ if (registrationResponse == null)
+ builder.clearRegistrationResponse();
+ this.registartionResponse = registrationResponse;
+ rebuild = true;
+ }
+
+ private RegistrationResponsePBImpl convertFromProtoFormat(RegistrationResponseProto p) {
+ return new RegistrationResponsePBImpl(p);
+ }
+
+ private RegistrationResponseProto convertToProtoFormat(RegistrationResponse t) {
+ return ((RegistrationResponsePBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/HeartbeatResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/HeartbeatResponse.java
new file mode 100644
index 0000000..8ca390b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/HeartbeatResponse.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.api.records;
+
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+
+public interface HeartbeatResponse {
+ int getResponseId();
+ boolean getReboot();
+
+ List<ContainerId> getContainersToCleanupList();
+ ContainerId getContainerToCleanup(int index);
+ int getContainersToCleanupCount();
+
+ List<ApplicationId> getApplicationsToCleanupList();
+ ApplicationId getApplicationsToCleanup(int index);
+ int getApplicationsToCleanupCount();
+
+ void setResponseId(int responseId);
+ void setReboot(boolean reboot);
+
+ void addAllContainersToCleanup(List<ContainerId> containers);
+ void addContainerToCleanup(ContainerId container);
+ void removeContainerToCleanup(int index);
+ void clearContainersToCleanup();
+
+ void addAllApplicationsToCleanup(List<ApplicationId> applications);
+ void addApplicationToCleanup(ApplicationId applicationId);
+ void removeApplicationToCleanup(int index);
+ void clearApplicationsToCleanup();
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java
new file mode 100644
index 0000000..7965b5c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.api.records;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+
+
+public interface NodeStatus {
+
+ public abstract NodeId getNodeId();
+ public abstract int getResponseId();
+
+ public abstract Map<ApplicationId, List<Container>> getAllContainers();
+ public abstract List<Container> getContainers(ApplicationId key);
+
+ NodeHealthStatus getNodeHealthStatus();
+ void setNodeHealthStatus(NodeHealthStatus healthStatus);
+
+ public abstract void setNodeId(NodeId nodeId);
+ public abstract void setResponseId(int responseId);
+
+ public abstract void addAllContainers(Map<ApplicationId, List<Container>> containers);
+ public abstract void setContainers(ApplicationId key, List<Container> containers);
+ public abstract void removeContainers(ApplicationId key);
+ public abstract void clearContainers();
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/RegistrationResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/RegistrationResponse.java
new file mode 100644
index 0000000..a237d90
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/RegistrationResponse.java
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.api.records;
+
+import java.nio.ByteBuffer;
+
+public interface RegistrationResponse {
+ public abstract ByteBuffer getSecretKey();
+
+ public abstract void setSecretKey(ByteBuffer secretKey);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/HeartbeatResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/HeartbeatResponsePBImpl.java
new file mode 100644
index 0000000..4bb5edd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/HeartbeatResponsePBImpl.java
@@ -0,0 +1,284 @@
+package org.apache.hadoop.yarn.server.api.records.impl.pb;
+
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.HeartbeatResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.HeartbeatResponseProtoOrBuilder;
+import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
+
+
+
+public class HeartbeatResponsePBImpl extends ProtoBase<HeartbeatResponseProto> implements HeartbeatResponse {
+ HeartbeatResponseProto proto = HeartbeatResponseProto.getDefaultInstance();
+ HeartbeatResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private List<ContainerId> containersToCleanup = null;
+
+ private List<ApplicationId> applicationsToCleanup = null;
+
+
+ public HeartbeatResponsePBImpl() {
+ builder = HeartbeatResponseProto.newBuilder();
+ }
+
+ public HeartbeatResponsePBImpl(HeartbeatResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public HeartbeatResponseProto getProto() {
+
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.containersToCleanup != null) {
+ addContainersToCleanupToProto();
+ }
+ if (this.applicationsToCleanup != null) {
+ addApplicationsToCleanupToProto();
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = HeartbeatResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public int getResponseId() {
+ HeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getResponseId());
+ }
+
+ @Override
+ public void setResponseId(int responseId) {
+ maybeInitBuilder();
+ builder.setResponseId((responseId));
+ }
+ @Override
+ public boolean getReboot() {
+ HeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getReboot());
+ }
+
+ @Override
+ public void setReboot(boolean reboot) {
+ maybeInitBuilder();
+ builder.setReboot((reboot));
+ }
+ @Override
+ public List<ContainerId> getContainersToCleanupList() {
+ initContainersToCleanup();
+ return this.containersToCleanup;
+ }
+ @Override
+ public ContainerId getContainerToCleanup(int index) {
+ initContainersToCleanup();
+ return this.containersToCleanup.get(index);
+ }
+ @Override
+ public int getContainersToCleanupCount() {
+ initContainersToCleanup();
+ return this.containersToCleanup.size();
+ }
+
+ private void initContainersToCleanup() {
+ if (this.containersToCleanup != null) {
+ return;
+ }
+ HeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder;
+ List<ContainerIdProto> list = p.getContainersToCleanupList();
+ this.containersToCleanup = new ArrayList<ContainerId>();
+
+ for (ContainerIdProto c : list) {
+ this.containersToCleanup.add(convertFromProtoFormat(c));
+ }
+ }
+
+ @Override
+ public void addAllContainersToCleanup(final List<ContainerId> containersToCleanup) {
+ if (containersToCleanup == null)
+ return;
+ initContainersToCleanup();
+ this.containersToCleanup.addAll(containersToCleanup);
+ }
+
+ private void addContainersToCleanupToProto() {
+ maybeInitBuilder();
+ builder.clearContainersToCleanup();
+ if (containersToCleanup == null)
+ return;
+ Iterable<ContainerIdProto> iterable = new Iterable<ContainerIdProto>() {
+ @Override
+ public Iterator<ContainerIdProto> iterator() {
+ return new Iterator<ContainerIdProto>() {
+
+ Iterator<ContainerId> iter = containersToCleanup.iterator();
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+
+ @Override
+ public ContainerIdProto next() {
+ return convertToProtoFormat(iter.next());
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+
+ }
+ };
+
+ }
+ };
+ builder.addAllContainersToCleanup(iterable);
+ }
+ @Override
+ public void addContainerToCleanup(ContainerId containersToCleanup) {
+ initContainersToCleanup();
+ this.containersToCleanup.add(containersToCleanup);
+ }
+ @Override
+ public void removeContainerToCleanup(int index) {
+ initContainersToCleanup();
+ this.containersToCleanup.remove(index);
+ }
+ @Override
+ public void clearContainersToCleanup() {
+ initContainersToCleanup();
+ this.containersToCleanup.clear();
+ }
+ @Override
+ public List<ApplicationId> getApplicationsToCleanupList() {
+ initApplicationsToCleanup();
+ return this.applicationsToCleanup;
+ }
+ @Override
+ public ApplicationId getApplicationsToCleanup(int index) {
+ initApplicationsToCleanup();
+ return this.applicationsToCleanup.get(index);
+ }
+ @Override
+ public int getApplicationsToCleanupCount() {
+ initApplicationsToCleanup();
+ return this.applicationsToCleanup.size();
+ }
+
+ private void initApplicationsToCleanup() {
+ if (this.applicationsToCleanup != null) {
+ return;
+ }
+ HeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder;
+ List<ApplicationIdProto> list = p.getApplicationsToCleanupList();
+ this.applicationsToCleanup = new ArrayList<ApplicationId>();
+
+ for (ApplicationIdProto c : list) {
+ this.applicationsToCleanup.add(convertFromProtoFormat(c));
+ }
+ }
+
+ @Override
+ public void addAllApplicationsToCleanup(final List<ApplicationId> applicationsToCleanup) {
+ if (applicationsToCleanup == null)
+ return;
+ initApplicationsToCleanup();
+ this.applicationsToCleanup.addAll(applicationsToCleanup);
+ }
+
+ private void addApplicationsToCleanupToProto() {
+ maybeInitBuilder();
+ builder.clearApplicationsToCleanup();
+ if (applicationsToCleanup == null)
+ return;
+ Iterable<ApplicationIdProto> iterable = new Iterable<ApplicationIdProto>() {
+ @Override
+ public Iterator<ApplicationIdProto> iterator() {
+ return new Iterator<ApplicationIdProto>() {
+
+ Iterator<ApplicationId> iter = applicationsToCleanup.iterator();
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+
+ @Override
+ public ApplicationIdProto next() {
+ return convertToProtoFormat(iter.next());
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+
+ }
+ };
+
+ }
+ };
+ builder.addAllApplicationsToCleanup(iterable);
+ }
+ @Override
+ public void addApplicationToCleanup(ApplicationId applicationsToCleanup) {
+ initApplicationsToCleanup();
+ this.applicationsToCleanup.add(applicationsToCleanup);
+ }
+ @Override
+ public void removeApplicationToCleanup(int index) {
+ initApplicationsToCleanup();
+ this.applicationsToCleanup.remove(index);
+ }
+ @Override
+ public void clearApplicationsToCleanup() {
+ initApplicationsToCleanup();
+ this.applicationsToCleanup.clear();
+ }
+
+ private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {
+ return new ContainerIdPBImpl(p);
+ }
+
+ private ContainerIdProto convertToProtoFormat(ContainerId t) {
+ return ((ContainerIdPBImpl)t).getProto();
+ }
+
+ private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) {
+ return new ApplicationIdPBImpl(p);
+ }
+
+ private ApplicationIdProto convertToProtoFormat(ApplicationId t) {
+ return ((ApplicationIdPBImpl)t).getProto();
+ }
+
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java
new file mode 100644
index 0000000..11eb642
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java
@@ -0,0 +1,302 @@
+package org.apache.hadoop.yarn.server.api.records.impl.pb;
+
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeHealthStatusPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeHealthStatusProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.ApplicationIdContainerListMapProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.ContainerListProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProtoOrBuilder;
+import org.apache.hadoop.yarn.server.api.records.NodeStatus;
+
+public class NodeStatusPBImpl extends ProtoBase<NodeStatusProto> implements NodeStatus {
+ NodeStatusProto proto = NodeStatusProto.getDefaultInstance();
+ NodeStatusProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private NodeId nodeId = null;
+ private Map<ApplicationIdProto, List<Container>> containers = null;
+ private NodeHealthStatus nodeHealthStatus = null;
+
+ public NodeStatusPBImpl() {
+ builder = NodeStatusProto.newBuilder();
+ }
+
+ public NodeStatusPBImpl(NodeStatusProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public NodeStatusProto getProto() {
+
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.nodeId != null) {
+ builder.setNodeId(convertToProtoFormat(this.nodeId));
+ }
+ if (this.containers != null) {
+ addContainersToProto();
+ }
+ if (this.nodeHealthStatus != null) {
+ builder.setNodeHealthStatus(convertToProtoFormat(this.nodeHealthStatus));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = NodeStatusProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public int getResponseId() {
+ NodeStatusProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getResponseId();
+ }
+ @Override
+ public void setResponseId(int responseId) {
+ maybeInitBuilder();
+ builder.setResponseId(responseId);
+ }
+ @Override
+ public NodeId getNodeId() {
+ NodeStatusProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.nodeId != null) {
+ return this.nodeId;
+ }
+ if (!p.hasNodeId()) {
+ return null;
+ }
+ this.nodeId = convertFromProtoFormat(p.getNodeId());
+
+ return this.nodeId;
+ }
+ @Override
+ public void setNodeId(NodeId nodeId) {
+ maybeInitBuilder();
+ if (nodeId == null)
+ builder.clearNodeId();
+ this.nodeId = nodeId;
+
+ }
+
+ @Override
+ public Map<ApplicationId, List<Container>> getAllContainers() {
+ initContainers();
+ HashMap<ApplicationId, List<Container>> returnMap = new HashMap<ApplicationId, List<Container>>(
+ this.containers.size());
+ for (Entry<ApplicationIdProto, List<Container>> entry : this.containers.entrySet()) {
+ returnMap.put(convertFromProtoFormat(entry.getKey()), entry.getValue());
+ }
+ return returnMap;
+ }
+
+ @Override
+ public List<Container> getContainers(ApplicationId applicationId) {
+ initContainers();
+ ApplicationIdProto applicationIdProto = convertToProtoFormat(applicationId);
+ if (this.containers.get(applicationIdProto) == null) {
+ this.containers.put(applicationIdProto, new ArrayList<Container>());
+ }
+ return this.containers.get(applicationIdProto);
+ }
+
+ private void initContainers() {
+ if (this.containers != null) {
+ return;
+ }
+ NodeStatusProtoOrBuilder p = viaProto ? proto : builder;
+ List<ApplicationIdContainerListMapProto> list = p.getContainersList();
+ this.containers = new HashMap<ApplicationIdProto, List<Container>>();
+
+ for (ApplicationIdContainerListMapProto c : list) {
+ this.containers.put(c.getApplicationId(), convertFromProtoFormat(c.getValue()));
+ }
+
+ }
+
+ @Override
+ public void addAllContainers(final Map<ApplicationId, List<Container>> containers) {
+ if (containers == null)
+ return;
+ initContainers();
+ for (Entry<ApplicationId, List<Container>> entry : containers.entrySet()) {
+ this.containers.put(convertToProtoFormat(entry.getKey()), entry.getValue());
+ }
+ }
+
+ private void addContainersToProto() {
+ maybeInitBuilder();
+ builder.clearContainers();
+ viaProto = false;
+ Iterable<ApplicationIdContainerListMapProto> iterable = new Iterable<ApplicationIdContainerListMapProto>() {
+
+ @Override
+ public Iterator<ApplicationIdContainerListMapProto> iterator() {
+ return new Iterator<ApplicationIdContainerListMapProto>() {
+
+ Iterator<ApplicationIdProto> keyIter = containers.keySet().iterator();
+ @Override
+ public boolean hasNext() {
+ return keyIter.hasNext();
+ }
+
+ @Override
+ public ApplicationIdContainerListMapProto next() {
+ ApplicationIdProto applicationIdProto = keyIter.next();
+ return ApplicationIdContainerListMapProto.newBuilder().setApplicationId(applicationIdProto).setValue(convertToProtoFormat(containers.get(applicationIdProto))).build();
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+
+ };
+ }
+
+ };
+ builder.addAllContainers(iterable);
+ }
+
+ @Override
+ public NodeHealthStatus getNodeHealthStatus() {
+ NodeStatusProtoOrBuilder p = viaProto ? proto : builder;
+ if (nodeHealthStatus != null) {
+ return nodeHealthStatus;
+ }
+ if (!p.hasNodeHealthStatus()) {
+ return null;
+ }
+ nodeHealthStatus = convertFromProtoFormat(p.getNodeHealthStatus());
+ return nodeHealthStatus;
+ }
+
+ @Override
+ public void setNodeHealthStatus(NodeHealthStatus healthStatus) {
+ maybeInitBuilder();
+ if (healthStatus == null) {
+ builder.clearNodeHealthStatus();
+ }
+ this.nodeHealthStatus = healthStatus;
+ }
+
+ /*
+ *
+ * @Override
+ public String getApplicationName() {
+ ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasApplicationName()) {
+ return null;
+ }
+ return (p.getApplicationName());
+ }
+
+ @Override
+ public void setApplicationName(String applicationName) {
+ maybeInitBuilder();
+ if (applicationName == null) {
+ builder.clearApplicationName();
+ return;
+ }
+ builder.setApplicationName((applicationName));
+ }
+ */
+
+ private ContainerListProto convertToProtoFormat(List<Container> src) {
+ ContainerListProto.Builder ret = ContainerListProto.newBuilder();
+ for (Container c : src) {
+ ret.addContainer(((ContainerPBImpl)c).getProto());
+ }
+ return ret.build();
+ }
+
+ private List<Container> convertFromProtoFormat(ContainerListProto src) {
+ List<Container> ret = new ArrayList<Container>();
+ for (ContainerProto c : src.getContainerList()) {
+ ret.add(convertFromProtoFormat(c));
+ }
+ return ret;
+ }
+
+ private Container convertFromProtoFormat(ContainerProto src) {
+ return new ContainerPBImpl(src);
+ }
+
+ @Override
+ public void setContainers(ApplicationId applicationId, List<Container> containers) {
+ initContainers();
+ this.containers.put(convertToProtoFormat(applicationId), containers);
+ }
+
+ @Override
+ public void removeContainers(ApplicationId applicationId) {
+ initContainers();
+ this.containers.remove(convertToProtoFormat(applicationId));
+ }
+
+ @Override
+ public void clearContainers() {
+ initContainers();
+ this.containers.clear();
+ }
+
+ private NodeIdProto convertToProtoFormat(NodeId nodeId) {
+ return ((NodeIdPBImpl)nodeId).getProto();
+ }
+
+ private NodeId convertFromProtoFormat(NodeIdProto proto) {
+ return new NodeIdPBImpl(proto);
+ }
+
+ private ApplicationIdProto convertToProtoFormat(ApplicationId applicationId) {
+ return ((ApplicationIdPBImpl)applicationId).getProto();
+ }
+
+ private ApplicationId convertFromProtoFormat(ApplicationIdProto proto) {
+ return new ApplicationIdPBImpl(proto);
+ }
+
+ private NodeHealthStatusProto convertToProtoFormat(
+ NodeHealthStatus healthStatus) {
+ return ((NodeHealthStatusPBImpl) healthStatus).getProto();
+ }
+
+ private NodeHealthStatus convertFromProtoFormat(NodeHealthStatusProto proto) {
+ return new NodeHealthStatusPBImpl(proto);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/RegistrationResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/RegistrationResponsePBImpl.java
new file mode 100644
index 0000000..b562d4d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/RegistrationResponsePBImpl.java
@@ -0,0 +1,83 @@
+package org.apache.hadoop.yarn.server.api.records.impl.pb;
+
+
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.RegistrationResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.RegistrationResponseProtoOrBuilder;
+import org.apache.hadoop.yarn.server.api.records.RegistrationResponse;
+
+
+
+public class RegistrationResponsePBImpl extends ProtoBase<RegistrationResponseProto> implements RegistrationResponse {
+ RegistrationResponseProto proto = RegistrationResponseProto.getDefaultInstance();
+ RegistrationResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ByteBuffer secretKey = null;
+
+ public RegistrationResponsePBImpl() {
+ builder = RegistrationResponseProto.newBuilder();
+ }
+
+ public RegistrationResponsePBImpl(RegistrationResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public RegistrationResponseProto getProto() {
+
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.secretKey != null) {
+ builder.setSecretKey(convertToProtoFormat(this.secretKey));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = RegistrationResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ @Override
+ public ByteBuffer getSecretKey() {
+ RegistrationResponseProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.secretKey != null) {
+ return this.secretKey;
+ }
+ if (!p.hasSecretKey()) {
+ return null;
+ }
+ this.secretKey = convertFromProtoFormat(p.getSecretKey());
+ return this.secretKey;
+ }
+
+ @Override
+ public void setSecretKey(ByteBuffer secretKey) {
+ maybeInitBuilder();
+ if (secretKey == null)
+ builder.clearSecretKey();
+ this.secretKey = secretKey;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/ContainerTokenSecretManager.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/ContainerTokenSecretManager.java
new file mode 100644
index 0000000..7da6948
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/ContainerTokenSecretManager.java
@@ -0,0 +1,78 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.security;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.crypto.SecretKey;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
+
+public class ContainerTokenSecretManager extends
+ SecretManager<ContainerTokenIdentifier> {
+
+ private static Log LOG = LogFactory
+ .getLog(ContainerTokenSecretManager.class);
+
+ private Map<String, SecretKey> secretkeys =
+ new HashMap<String, SecretKey>();
+
+ // Used by master for generation of secretyKey per host
+ public SecretKey createAndGetSecretKey(CharSequence hostName) {
+ String hostNameStr = hostName.toString();
+ if (!this.secretkeys.containsKey(hostNameStr)) {
+ LOG.debug("Creating secretKey for NM " + hostNameStr);
+ this.secretkeys.put(hostNameStr,
+ createSecretKey("mySecretKey".getBytes()));
+ }
+ return this.secretkeys.get(hostNameStr);
+ }
+
+ // Used by slave for using secretKey sent by the master.
+ public void setSecretKey(CharSequence hostName, byte[] secretKeyBytes) {
+ this.secretkeys.put(hostName.toString(), createSecretKey(secretKeyBytes));
+ }
+
+ @Override
+ public byte[] createPassword(ContainerTokenIdentifier identifier) {
+ LOG.debug("Creating password for " + identifier.getContainerID()
+ + " to be run on NM " + identifier.getNmHostName() + " "
+ + this.secretkeys.get(identifier.getNmHostName()));
+ return createPassword(identifier.getBytes(),
+ this.secretkeys.get(identifier.getNmHostName()));
+ }
+
+ @Override
+ public byte[] retrievePassword(ContainerTokenIdentifier identifier)
+ throws org.apache.hadoop.security.token.SecretManager.InvalidToken {
+ LOG.debug("Retrieving password for " + identifier.getContainerID()
+ + " to be run on NM " + identifier.getNmHostName());
+ return createPassword(identifier.getBytes(),
+ this.secretkeys.get(identifier.getNmHostName()));
+ }
+
+ @Override
+ public ContainerTokenIdentifier createIdentifier() {
+ return new ContainerTokenIdentifier();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/ResourceTracker.proto b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/ResourceTracker.proto
new file mode 100644
index 0000000..922ce2c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/ResourceTracker.proto
@@ -0,0 +1,11 @@
+option java_package = "org.apache.hadoop.yarn.proto";
+option java_outer_classname = "ResourceTracker";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+
+import "yarn_server_common_service_protos.proto";
+
+service ResourceTrackerService {
+ rpc registerNodeManager(RegisterNodeManagerRequestProto) returns (RegisterNodeManagerResponseProto);
+ rpc nodeHeartbeat(NodeHeartbeatRequestProto) returns (NodeHeartbeatResponseProto);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto
new file mode 100644
index 0000000..3dde8c0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto
@@ -0,0 +1,34 @@
+option java_package = "org.apache.hadoop.yarn.proto";
+option java_outer_classname = "YarnServerCommonProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+
+import "yarn_protos.proto";
+
+message NodeStatusProto {
+ optional NodeIdProto node_id = 1;
+ optional int32 response_id = 2;
+ repeated ApplicationIdContainerListMapProto containers = 3;
+ optional NodeHealthStatusProto nodeHealthStatus = 4;
+}
+
+message RegistrationResponseProto {
+ optional bytes secret_key = 1;
+}
+
+message HeartbeatResponseProto {
+ optional int32 response_id = 1;
+ optional bool reboot = 2;
+ repeated ContainerIdProto containers_to_cleanup = 3;
+ repeated ApplicationIdProto applications_to_cleanup = 4;
+}
+
+message ContainerListProto {
+ repeated ContainerProto container = 1;
+}
+
+message ApplicationIdContainerListMapProto {
+ optional ApplicationIdProto application_id = 1;
+ optional ContainerListProto value = 2;
+}
+
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
new file mode 100644
index 0000000..1a6f9ff
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
@@ -0,0 +1,24 @@
+option java_package = "org.apache.hadoop.yarn.proto";
+option java_outer_classname = "YarnServerCommonServiceProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+
+import "yarn_protos.proto";
+import "yarn_server_common_protos.proto";
+
+message RegisterNodeManagerRequestProto {
+ optional NodeIdProto node_id = 1;
+ optional int32 http_port = 3;
+ optional ResourceProto resource = 4;
+}
+message RegisterNodeManagerResponseProto {
+ optional RegistrationResponseProto registration_response = 1;
+}
+
+message NodeHeartbeatRequestProto {
+ optional NodeStatusProto node_status = 1;
+}
+
+message NodeHeartbeatResponseProto {
+ optional HeartbeatResponseProto heartbeat_response = 1;
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
new file mode 100644
index 0000000..2e92fa8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
@@ -0,0 +1 @@
+org.apache.hadoop.yarn.server.RMNMSecurityInfoClass
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/yarn-default.xml b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/yarn-default.xml
new file mode 100644
index 0000000..fa9a74a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/yarn-default.xml
@@ -0,0 +1,204 @@
+<?xml version="1.0"?>
+<configuration>
+
+ <property>
+ <name>yarn.resourcemanager.principal</name>
+ <value>rm/sightbusy-lx@LOCALHOST</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.principal</name>
+ <value>nm/sightbusy-lx@LOCALHOST</value>
+ </property>
+
+
+<!-- All resourcemanager related configuration properties -->
+
+ <property>
+ <name>yarn.server.resourcemanager.address</name>
+ <value>0.0.0.0:8020</value>
+ </property>
+
+ <property>
+ <name>yarn.server.resourcemanager.resourcetracker.address</name>
+ <value>0.0.0.0:8025</value>
+ </property>
+
+ <property>
+ <name>yarn.server.resourcemanager.scheduler.address</name>
+ <value>0.0.0.0:8030</value>
+ </property>
+
+ <property>
+ <name>yarn.server.resourcemanager.admin.address</name>
+ <value>0.0.0.0:8141</value>
+ </property>
+
+ <property>
+ <name>yarn.server.resourcemanager.application.max.retries</name>
+ <value>1</value>
+ <description>The number of times an application will be retried in case
+ of AM failure.</description>
+ </property>
+ <property>
+ <name>yarn.server.resourcemanager.keytab</name>
+ <value>/etc/krb5.keytab</value>
+ </property>
+
+<!-- All nodemanager related configuration properties -->
+
+ <property>
+ <name>yarn.server.nodemanager.local-dir</name>
+ <value>/tmp/nm-local-dir</value>
+ </property>
+
+ <property>
+ <name>yarn.server.nodemanager.log.dir</name>
+ <value>/tmp/logs</value>
+ </property>
+
+ <property>
+ <name>yarn.apps.stagingDir</name>
+ <value>/tmp/hadoop-yarn/${user.name}/staging</value>
+ </property>
+
+ <property>
+ <name>yarn.apps.history.stagingDir</name>
+ <value>/tmp/hadoop-yarn/${user.name}/staging</value>
+ </property>
+
+ <property>
+ <name>yarn.server.nodemanager.keytab</name>
+ <value>/etc/krb5.keytab</value>
+ </property>
+
+ <property>
+ <name>yarn.server.nodemanager.container-executor.class</name>
+ <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
+ <!--<value>org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor</value>-->
+ </property>
+
+ <property><name>NM_HOSTS</name><value>0.0.0.0:45454</value></property>
+
+ <property>
+ <name>yarn.server.nodemanager.address</name>
+ <value>0.0.0.0:45454</value>
+ </property>
+
+ <!-- HealthChecker's properties -->
+ <property>
+ <name>yarn.server.nodemanager.healthchecker.script.path</name>
+ <value></value>
+ <description>Location of the node's health-check script on the local
+ file-system.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.server.nodemanager.healthchecker.interval</name>
+ <value>600000</value>
+ <description>Frequency of the health-check run by the NodeManager
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.server.nodemanager.healthchecker.script.timeout</name>
+ <value>1200000</value>
+ <description>Timeout for the health-check run by the NodeManager
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.server.nodemanager.healthchecker.script.args</name>
+ <value></value>
+ <description>Arguments to be passed to the health-check script run
+ by the NodeManager</description>
+ </property>
+
+ <property>
+ <name>yarn.server.nodemanager.healthchecker.script.path</name>
+ <value></value>
+ <description>Location of the node's health-check script on the local
+ file-system.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.server.nodemanager.healthchecker.interval</name>
+ <value>600000</value>
+ <description>Frequency of the health-check run by the NodeManager
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.server.nodemanager.healthchecker.script.timeout</name>
+ <value>1200000</value>
+ <description>Timeout for the health-check run by the NodeManager
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.server.nodemanager.healthchecker.script.args</name>
+ <value></value>
+ <description>Arguments to be passed to the health-check script run
+ by the NodeManager</description>
+ </property>
+ <!-- End of HealthChecker's properties -->
+
+ <!-- ContainerMonitor related properties -->
+
+ <property>
+ <name>yarn.server.nodemanager.containers-monitor.monitoring-interval</name>
+ <value>3000</value>
+ </property>
+
+ <property>
+ <name>yarn.server.nodemanager.containers-monitor.resourcecalculatorplugin</name>
+ <value></value>
+ </property>
+
+ <property>
+ <name>yarn.server.nodemanager.reserved-physical-memory.mb</name>
+ <value>-1</value>
+ </property>
+
+ <!-- End of ContainerMonitor related properties -->
+
+<!-- All MRAppMaster related configuration properties -->
+
+ <property>
+ <name>yarn.server.mapreduce-appmanager.attempt-listener.bindAddress</name>
+ <value>0.0.0.0</value>
+ </property>
+
+ <property>
+ <name>yarn.server.mapreduce-appmanager.client-service.bindAddress</name>
+ <value>0.0.0.0</value>
+ </property>
+
+
+ <property>
+ <name>mapreduce.job.jar</name>
+ <value></value>
+ <!--<value>~/Workspace/eclipse-workspace/yarn/yarn-mapreduce/yarn-mapreduce-app/target/yarn-mapreduce-app-1.0-SNAPSHOT.jar</value>-->
+ </property>
+
+ <property>
+ <name>mapreduce.job.hdfs-servers</name>
+ <value>${fs.default.name}</value>
+ </property>
+
+ <property>
+ <name>nodemanager.auxiluary.services</name>
+ <value></value>
+ <!-- <value>mapreduce.shuffle</value> -->
+ </property>
+
+<!--
+ <property>
+ <name>nodemanager.aux.service.mapreduce.shuffle.class</name>
+ <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+ </property>
+-->
+
+</configuration>
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/TestNodeHealthService.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/TestNodeHealthService.java
new file mode 100644
index 0000000..3f37d62
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/TestNodeHealthService.java
@@ -0,0 +1,172 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.TimerTask;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestNodeHealthService {
+
+ private static volatile Log LOG = LogFactory
+ .getLog(TestNodeHealthService.class);
+
+ protected static File testRootDir = new File("target",
+ TestNodeHealthService.class.getName() + "-localDir").getAbsoluteFile();
+
+ final static File nodeHealthConfigFile = new File(testRootDir,
+ "modified-mapred-site.xml");
+
+ private File nodeHealthscriptFile = new File(testRootDir,
+ "failingscript.sh");
+
+ @Before
+ public void setup() {
+ testRootDir.mkdirs();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ if (testRootDir.exists()) {
+ FileContext.getLocalFSFileContext().delete(
+ new Path(testRootDir.getAbsolutePath()), true);
+ }
+ }
+
+ private Configuration getConfForNodeHealthScript() {
+ Configuration conf = new Configuration();
+ conf.set(NodeHealthCheckerService.HEALTH_CHECK_SCRIPT_PROPERTY,
+ nodeHealthscriptFile.getAbsolutePath());
+ conf.setLong(NodeHealthCheckerService.HEALTH_CHECK_INTERVAL_PROPERTY, 500);
+ conf.setLong(
+ NodeHealthCheckerService.HEALTH_CHECK_FAILURE_INTERVAL_PROPERTY, 1000);
+ return conf;
+ }
+
+ private void writeNodeHealthScriptFile(String scriptStr, boolean setExecutable)
+ throws IOException {
+ PrintWriter pw = new PrintWriter(new FileOutputStream(nodeHealthscriptFile));
+ pw.println(scriptStr);
+ pw.flush();
+ pw.close();
+ nodeHealthscriptFile.setExecutable(setExecutable);
+ }
+
+ @Test
+ public void testNodeHealthScriptShouldRun() throws IOException {
+ // Node health script should not start if there is no property called
+ // node health script path.
+ Assert.assertFalse("By default Health checker should not have started",
+ NodeHealthCheckerService.shouldRun(new Configuration()));
+ Configuration conf = getConfForNodeHealthScript();
+ // Node health script should not start if the node health script does not
+ // exists
+ Assert.assertFalse("Node health script should start", NodeHealthCheckerService
+ .shouldRun(conf));
+ // Create script path.
+ conf.writeXml(new FileOutputStream(nodeHealthConfigFile));
+ conf.addResource(nodeHealthConfigFile.getName());
+ writeNodeHealthScriptFile("", false);
+ // Node health script should not start if the node health script is not
+ // executable.
+ Assert.assertFalse("Node health script should start", NodeHealthCheckerService
+ .shouldRun(conf));
+ writeNodeHealthScriptFile("", true);
+ Assert.assertTrue("Node health script should start", NodeHealthCheckerService
+ .shouldRun(conf));
+ }
+
+ @Test
+ public void testNodeHealthScript() throws Exception {
+ RecordFactory factory = RecordFactoryProvider.getRecordFactory(null);
+ NodeHealthStatus healthStatus =
+ factory.newRecordInstance(NodeHealthStatus.class);
+ String errorScript = "echo ERROR\n echo \"Tracker not healthy\"";
+ String normalScript = "echo \"I am all fine\"";
+ String timeOutScript = "sleep 4\n echo\"I am fine\"";
+ Configuration conf = getConfForNodeHealthScript();
+ conf.writeXml(new FileOutputStream(nodeHealthConfigFile));
+ conf.addResource(nodeHealthConfigFile.getName());
+
+ NodeHealthCheckerService nodeHealthChecker = new NodeHealthCheckerService(
+ conf);
+ TimerTask timer = nodeHealthChecker.getTimer();
+ writeNodeHealthScriptFile(normalScript, true);
+ timer.run();
+
+ nodeHealthChecker.setHealthStatus(healthStatus);
+ LOG.info("Checking initial healthy condition");
+ // Check proper report conditions.
+ Assert.assertTrue("Node health status reported unhealthy", healthStatus
+ .getIsNodeHealthy());
+ Assert.assertTrue("Node health status reported unhealthy", healthStatus
+ .getHealthReport().isEmpty());
+
+ // write out error file.
+ // Healthy to unhealthy transition
+ writeNodeHealthScriptFile(errorScript, true);
+ // Run timer
+ timer.run();
+ // update health status
+ nodeHealthChecker.setHealthStatus(healthStatus);
+ LOG.info("Checking Healthy--->Unhealthy");
+ Assert.assertFalse("Node health status reported healthy", healthStatus
+ .getIsNodeHealthy());
+ Assert.assertFalse("Node health status reported healthy", healthStatus
+ .getHealthReport().isEmpty());
+
+ // Check unhealthy to healthy transitions.
+ writeNodeHealthScriptFile(normalScript, true);
+ timer.run();
+ nodeHealthChecker.setHealthStatus(healthStatus);
+ LOG.info("Checking UnHealthy--->healthy");
+ // Check proper report conditions.
+ Assert.assertTrue("Node health status reported unhealthy", healthStatus
+ .getIsNodeHealthy());
+ Assert.assertTrue("Node health status reported unhealthy", healthStatus
+ .getHealthReport().isEmpty());
+
+ // Healthy to timeout transition.
+ writeNodeHealthScriptFile(timeOutScript, true);
+ timer.run();
+ nodeHealthChecker.setHealthStatus(healthStatus);
+ LOG.info("Checking Healthy--->timeout");
+ Assert.assertFalse("Node health status reported healthy even after timeout",
+ healthStatus.getIsNodeHealthy());
+ Assert.assertEquals("Node time out message not propogated", healthStatus
+ .getHealthReport(),
+ NodeHealthCheckerService.NODE_HEALTH_SCRIPT_TIMED_OUT_MSG);
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPCFactories.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPCFactories.java
new file mode 100644
index 0000000..15746bb
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPCFactories.java
@@ -0,0 +1,100 @@
+package org.apache.hadoop.yarn;
+
+import java.net.InetSocketAddress;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.impl.pb.RpcClientFactoryPBImpl;
+import org.apache.hadoop.yarn.factories.impl.pb.RpcServerFactoryPBImpl;
+import org.apache.hadoop.yarn.server.api.ResourceTracker;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.junit.Test;
+
+public class TestRPCFactories {
+
+
+
+ @Test
+ public void test() {
+ testPbServerFactory();
+
+ testPbClientFactory();
+ }
+
+
+
+ private void testPbServerFactory() {
+ InetSocketAddress addr = new InetSocketAddress(0);
+ Configuration conf = new Configuration();
+ ResourceTracker instance = new ResourceTrackerTestImpl();
+ Server server = null;
+ try {
+ server =
+ RpcServerFactoryPBImpl.get().getServer(
+ ResourceTracker.class, instance, addr, conf, null, 1);
+ server.start();
+ } catch (YarnException e) {
+ e.printStackTrace();
+ Assert.fail("Failed to create server");
+ } finally {
+ server.stop();
+ }
+ }
+
+
+ private void testPbClientFactory() {
+ InetSocketAddress addr = new InetSocketAddress(0);
+ System.err.println(addr.getHostName() + addr.getPort());
+ Configuration conf = new Configuration();
+ ResourceTracker instance = new ResourceTrackerTestImpl();
+ Server server = null;
+ try {
+ server =
+ RpcServerFactoryPBImpl.get().getServer(
+ ResourceTracker.class, instance, addr, conf, null, 1);
+ server.start();
+ System.err.println(server.getListenerAddress());
+ System.err.println(NetUtils.getConnectAddress(server));
+
+ ResourceTracker client = null;
+ try {
+ client = (ResourceTracker) RpcClientFactoryPBImpl.get().getClient(ResourceTracker.class, 1, NetUtils.getConnectAddress(server), conf);
+ } catch (YarnException e) {
+ e.printStackTrace();
+ Assert.fail("Failed to create client");
+ }
+
+ } catch (YarnException e) {
+ e.printStackTrace();
+ Assert.fail("Failed to create server");
+ } finally {
+ server.stop();
+ }
+ }
+
+ public class ResourceTrackerTestImpl implements ResourceTracker {
+
+ @Override
+ public RegisterNodeManagerResponse registerNodeManager(
+ RegisterNodeManagerRequest request) throws YarnRemoteException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
+ throws YarnRemoteException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRecordFactory.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRecordFactory.java
new file mode 100644
index 0000000..fb77999
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRecordFactory.java
@@ -0,0 +1,38 @@
+package org.apache.hadoop.yarn;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
+import org.apache.hadoop.yarn.server.api.records.impl.pb.HeartbeatResponsePBImpl;
+import org.junit.Test;
+
+public class TestRecordFactory {
+
+ @Test
+ public void testPbRecordFactory() {
+ RecordFactory pbRecordFactory = RecordFactoryPBImpl.get();
+
+ try {
+ HeartbeatResponse response = pbRecordFactory.newRecordInstance(HeartbeatResponse.class);
+ Assert.assertEquals(HeartbeatResponsePBImpl.class, response.getClass());
+ } catch (YarnException e) {
+ e.printStackTrace();
+ Assert.fail("Failed to crete record");
+ }
+
+ try {
+ NodeHeartbeatRequest request = pbRecordFactory.newRecordInstance(NodeHeartbeatRequest.class);
+ Assert.assertEquals(NodeHeartbeatRequestPBImpl.class, request.getClass());
+ } catch (YarnException e) {
+ e.printStackTrace();
+ Assert.fail("Failed to crete record");
+ }
+
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/lib/TestZKClient.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/lib/TestZKClient.java
new file mode 100644
index 0000000..586533e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/lib/TestZKClient.java
@@ -0,0 +1,187 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.lib;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.yarn.lib.ZKClient;
+import org.apache.zookeeper.server.NIOServerCnxn;
+import org.apache.zookeeper.server.ZKDatabase;
+import org.apache.zookeeper.server.ZooKeeperServer;
+import org.apache.zookeeper.server.persistence.FileTxnLog;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestZKClient {
+
+ public static int CONNECTION_TIMEOUT = 30000;
+ static final File BASETEST =
+ new File(System.getProperty("build.test.dir", "target/zookeeper-build"));
+
+ protected String hostPort = "127.0.0.1:2000";
+ protected int maxCnxns = 0;
+ protected NIOServerCnxn.Factory factory = null;
+ protected File tmpDir = null;
+
+ public static String send4LetterWord(String host, int port, String cmd)
+ throws IOException
+ {
+ Socket sock = new Socket(host, port);
+ BufferedReader reader = null;
+ try {
+ OutputStream outstream = sock.getOutputStream();
+ outstream.write(cmd.getBytes());
+ outstream.flush();
+ // this replicates NC - close the output stream before reading
+ sock.shutdownOutput();
+
+ reader =
+ new BufferedReader(
+ new InputStreamReader(sock.getInputStream()));
+ StringBuilder sb = new StringBuilder();
+ String line;
+ while((line = reader.readLine()) != null) {
+ sb.append(line + "\n");
+ }
+ return sb.toString();
+ } finally {
+ sock.close();
+ if (reader != null) {
+ reader.close();
+ }
+ }
+ }
+
+ public static boolean waitForServerDown(String hp, long timeout) {
+ long start = System.currentTimeMillis();
+ while (true) {
+ try {
+ String host = hp.split(":")[0];
+ int port = Integer.parseInt(hp.split(":")[1]);
+ send4LetterWord(host, port, "stat");
+ } catch (IOException e) {
+ return true;
+ }
+
+ if (System.currentTimeMillis() > start + timeout) {
+ break;
+ }
+ try {
+ Thread.sleep(250);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ }
+ return false;
+ }
+
+
+ public static boolean waitForServerUp(String hp, long timeout) {
+ long start = System.currentTimeMillis();
+ while (true) {
+ try {
+ String host = hp.split(":")[0];
+ int port = Integer.parseInt(hp.split(":")[1]);
+ // if there are multiple hostports, just take the first one
+ String result = send4LetterWord(host, port, "stat");
+ if (result.startsWith("Zookeeper version:")) {
+ return true;
+ }
+ } catch (IOException e) {
+ }
+ if (System.currentTimeMillis() > start + timeout) {
+ break;
+ }
+ try {
+ Thread.sleep(250);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ }
+ return false;
+ }
+
+ public static File createTmpDir(File parentDir) throws IOException {
+ File tmpFile = File.createTempFile("test", ".junit", parentDir);
+ // don't delete tmpFile - this ensures we don't attempt to create
+ // a tmpDir with a duplicate name
+ File tmpDir = new File(tmpFile + ".dir");
+ Assert.assertFalse(tmpDir.exists());
+ Assert.assertTrue(tmpDir.mkdirs());
+ return tmpDir;
+ }
+
+ @Before
+ public void setUp() throws IOException, InterruptedException {
+ System.setProperty("zookeeper.preAllocSize", "100");
+ FileTxnLog.setPreallocSize(100 * 1024);
+ if (!BASETEST.exists()) {
+ BASETEST.mkdirs();
+ }
+ File dataDir = createTmpDir(BASETEST);
+ ZooKeeperServer zks = new ZooKeeperServer(dataDir, dataDir, 3000);
+ final int PORT = Integer.parseInt(hostPort.split(":")[1]);
+ if (factory == null) {
+ factory = new NIOServerCnxn.Factory(new InetSocketAddress(PORT),maxCnxns);
+ }
+ factory.startup(zks);
+ Assert.assertTrue("waiting for server up",
+ waitForServerUp("127.0.0.1:" + PORT,
+ CONNECTION_TIMEOUT));
+
+ }
+
+ @After
+ public void tearDown() throws IOException, InterruptedException {
+ if (factory != null) {
+ ZKDatabase zkDb = factory.getZooKeeperServer().getZKDatabase();
+ factory.shutdown();
+ try {
+ zkDb.close();
+ } catch (IOException ie) {
+ }
+ final int PORT = Integer.parseInt(hostPort.split(":")[1]);
+
+ Assert.assertTrue("waiting for server down",
+ waitForServerDown("127.0.0.1:" + PORT,
+ CONNECTION_TIMEOUT));
+ }
+
+ }
+ @Test
+ public void testzkClient() throws Exception {
+ test("/some/test");
+ }
+
+ private void test(String testClient) throws Exception {
+ ZKClient client = new ZKClient(hostPort);
+ client.registerService("/nodemanager", "hostPort");
+ client.unregisterService("/nodemanager");
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
new file mode 100644
index 0000000..fa88ec4
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
@@ -0,0 +1,174 @@
+<?xml version="1.0"?>
+<project>
+ <parent>
+ <artifactId>hadoop-yarn-server</artifactId>
+ <groupId>org.apache.hadoop</groupId>
+ <version>${yarn.version}</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-nodemanager</artifactId>
+ <name>hadoop-yarn-server-nodemanager</name>
+
+ <properties>
+ <install.file>${project.artifact.file}</install.file>
+ <yarn.basedir>${project.parent.parent.basedir}</yarn.basedir>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-common</artifactId>
+ </dependency>
+ </dependencies>
+
+ <profiles>
+ <profile>
+ <id>cbuild</id>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>make-maven-plugin</artifactId>
+ <version>1.0-beta-1</version>
+ <executions>
+ <execution>
+ <id>autoreconf</id>
+ <phase>package</phase>
+ <configuration>
+ <arguments>
+ <argument>-i</argument>
+ </arguments>
+ <workDir>src/main/c/container-executor</workDir>
+ </configuration>
+ <goals>
+ <goal>autoreconf</goal>
+ </goals>
+ </execution>
+ <execution>
+ <id>make</id>
+ <phase>package</phase>
+ <configuration>
+ <workDir>src/main/c/container-executor</workDir>
+ <configureEnvironment>
+ <property>
+ <name>CFLAGS</name>
+ <value>-DHADOOP_CONF_DIR=${container-executor.conf.dir}</value>
+ </property>
+ </configureEnvironment>
+ <sources>
+ <source>
+ <directory>src/main/c/container-executor</directory>
+ </source>
+ </sources>
+ <workDir>src/main/c/container-executor</workDir>
+ <destDir>target</destDir>
+ <prefix>${project.build.outputDirectory}</prefix>
+ </configuration>
+ <goals>
+ <!-- always clean, to ensure conf dir regenerated -->
+ <goal>make-clean</goal>
+ <goal>configure</goal>
+ </goals>
+ </execution>
+ <execution>
+ <id>install</id>
+ <phase>package</phase>
+ <configuration>
+ <destDir>/</destDir>
+ <workDir>src/main/c/container-executor</workDir>
+ </configuration>
+ <goals>
+ <goal>make-install</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ <activation>
+ <activeByDefault>true</activeByDefault>
+ </activation>
+ </profile>
+ </profiles>
+
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <systemPropertyVariables>
+ <property>
+ <name>container-executor-path</name>
+ <value></value>
+ </property>
+ </systemPropertyVariables>
+ <excludes>
+ <exclude>**/TestFSDownload.java</exclude>
+ </excludes>
+ </configuration>
+ </plugin>
+
+ <plugin>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>create-protobuf-generated-sources-directory</id>
+ <phase>initialize</phase>
+ <configuration>
+ <target>
+ <mkdir dir="target/generated-sources/proto" />
+ </target>
+ </configuration>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>generate-sources</id>
+ <phase>generate-sources</phase>
+ <configuration>
+ <executable>protoc</executable>
+ <arguments>
+ <argument>-I../../hadoop-yarn-api/src/main/proto/</argument>
+ <argument>-Isrc/main/proto/</argument>
+ <argument>--java_out=target/generated-sources/proto</argument>
+ <argument>src/main/proto/yarn_server_nodemanager_service_protos.proto</argument>
+ <argument>src/main/proto/LocalizationProtocol.proto</argument>
+ </arguments>
+ </configuration>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>add-source</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>add-source</goal>
+ </goals>
+ <configuration>
+ <sources>
+ <source>target/generated-sources/proto</source>
+ </sources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/avro/LocalizationProtocol.genavro b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/avro/LocalizationProtocol.genavro
new file mode 100644
index 0000000..01e0c07
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/avro/LocalizationProtocol.genavro
@@ -0,0 +1,11 @@
+@namespace("org.apache.hadoop.yarn")
+protocol LocalizationProtocol {
+
+ import idl "yarn/yarn-api/src/main/avro/yarn-types.genavro";
+
+ void successfulLocalization(string user, LocalResource resource, URL path)
+ throws YarnRemoteException;
+
+ void failedLocalization(string user, LocalResource resource, YarnRemoteException path)
+ throws YarnRemoteException;
+}
diff --git a/mapreduce/src/c++/pipes/.autom4te.cfg b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/.autom4te.cfg
similarity index 100%
copy from mapreduce/src/c++/pipes/.autom4te.cfg
copy to hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/.autom4te.cfg
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/.deps/container-executor.Po b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/.deps/container-executor.Po
new file mode 100644
index 0000000..9ce06a8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/.deps/container-executor.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/.gitignore b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/.gitignore
new file mode 100644
index 0000000..6f5c2a6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/.gitignore
@@ -0,0 +1,18 @@
+Makefile
+Makefile.in
+aclocal.m4
+compile
+config.log
+config.status
+configure
+depcomp
+impl/.deps/
+impl/.dirstamp
+impl/configuration.o
+impl/main.o
+impl/container-executor.o
+install-sh
+libtool
+missing
+container-executor
+test/.deps/
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/Makefile.am b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/Makefile.am
new file mode 100644
index 0000000..cd32869
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/Makefile.am
@@ -0,0 +1,32 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+AM_CFLAGS=-I$(srcdir)/impl -Wall -g -Werror
+
+# Define the programs that need to be built
+bin_PROGRAMS = container-executor
+#check_PROGRAMS = test-task-controller
+
+#TESTS = test-task-controller
+
+# Define the sources for the common files
+common_SOURCES = impl/configuration.c impl/container-executor.c
+
+# Define the sources for the real executable
+container_executor_SOURCES = $(common_SOURCES) impl/main.c
+
+# Define the sources for the test executable
+#test_task_controller_SOURCES = $(common_SOURCES) test/test-task-controller.c
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/configure.ac b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/configure.ac
new file mode 100644
index 0000000..e4bd656
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/configure.ac
@@ -0,0 +1,54 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -*- Autoconf -*-
+# Process this file with autoconf to produce a configure script.
+
+AC_PREREQ(2.59)
+AC_INIT(linux-container-executor, 1.0.0, yarn-dev@yahoo-inc.com)
+AC_GNU_SOURCE
+#AC_SYS_LARGEFILE
+
+AM_INIT_AUTOMAKE([subdir-objects foreign no-dist])
+
+AC_CONFIG_SRCDIR([impl/container-executor.c])
+AC_CONFIG_FILES([Makefile])
+
+AC_PREFIX_DEFAULT(`pwd`/../install)
+
+CHECK_INSTALL_CFLAG
+HADOOP_UTILS_SETUP
+
+# Checks for programs.
+AC_PROG_CC
+AM_PROG_CC_C_O
+
+# Checks for libraries.
+
+# Checks for header files.
+AC_LANG(C)
+AC_CHECK_HEADERS([unistd.h])
+
+# Checks for typedefs, structures, and compiler characteristics.
+AC_HEADER_STDBOOL
+AC_C_CONST
+AC_TYPE_OFF_T
+AC_TYPE_SIZE_T
+AC_FUNC_STRERROR_R
+
+# Checks for library functions.
+AC_CHECK_FUNCS([mkdir uname])
+AC_OUTPUT
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/impl/configuration.c b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/impl/configuration.c
new file mode 100644
index 0000000..f1f53bf
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/impl/configuration.c
@@ -0,0 +1,296 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// ensure we get the posix version of dirname by including this first
+#include <libgen.h>
+
+#include "configuration.h"
+#include "container-executor.h"
+
+#include <errno.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#define MAX_SIZE 10
+
+struct confentry {
+ const char *key;
+ const char *value;
+};
+
+struct configuration {
+ int size;
+ struct confentry **confdetails;
+};
+
+struct configuration config={.size=0, .confdetails=NULL};
+
+//clean up method for freeing configuration
+void free_configurations() {
+ int i = 0;
+ for (i = 0; i < config.size; i++) {
+ if (config.confdetails[i]->key != NULL) {
+ free((void *)config.confdetails[i]->key);
+ }
+ if (config.confdetails[i]->value != NULL) {
+ free((void *)config.confdetails[i]->value);
+ }
+ free(config.confdetails[i]);
+ }
+ if (config.size > 0) {
+ free(config.confdetails);
+ }
+ config.size = 0;
+}
+
+/**
+ * Is the file/directory only writable by root.
+ * Returns 1 if true
+ */
+static int is_only_root_writable(const char *file) {
+ struct stat file_stat;
+ if (stat(file, &file_stat) != 0) {
+ fprintf(LOGFILE, "Can't stat file %s - %s\n", file, strerror(errno));
+ return 0;
+ }
+ if (file_stat.st_uid != 0) {
+ fprintf(LOGFILE, "File %s must be owned by root, but is owned by %d\n",
+ file, file_stat.st_uid);
+ return 0;
+ }
+ if ((file_stat.st_mode & (S_IWGRP | S_IWOTH)) != 0) {
+ fprintf(LOGFILE,
+ "File %s must not be world or group writable, but is %03o\n",
+ file, file_stat.st_mode & (~S_IFMT));
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Ensure that the configuration file and all of the containing directories
+ * are only writable by root. Otherwise, an attacker can change the
+ * configuration and potentially cause damage.
+ * returns 0 if permissions are ok
+ */
+int check_configuration_permissions(const char* file_name) {
+ // copy the input so that we can modify it with dirname
+ char* dir = strdup(file_name);
+ char* buffer = dir;
+ do {
+ if (!is_only_root_writable(dir)) {
+ free(buffer);
+ return -1;
+ }
+ dir = dirname(dir);
+ } while (strcmp(dir, "/") != 0);
+ free(buffer);
+ return 0;
+}
+
+//function used to load the configurations present in the secure config
+void read_config(const char* file_name) {
+ fprintf(LOGFILE, "Reading task controller config from %s\n" , file_name);
+ FILE *conf_file;
+ char *line;
+ char *equaltok;
+ char *temp_equaltok;
+ size_t linesize = 1000;
+ int size_read = 0;
+
+ if (file_name == NULL) {
+ fprintf(LOGFILE, "Null configuration filename passed in\n");
+ exit(INVALID_CONFIG_FILE);
+ }
+
+ #ifdef DEBUG
+ fprintf(LOGFILE, "read_config :Conf file name is : %s \n", file_name);
+ #endif
+
+ //allocate space for ten configuration items.
+ config.confdetails = (struct confentry **) malloc(sizeof(struct confentry *)
+ * MAX_SIZE);
+ config.size = 0;
+ conf_file = fopen(file_name, "r");
+ if (conf_file == NULL) {
+ fprintf(LOGFILE, "Invalid conf file provided : %s \n", file_name);
+ exit(INVALID_CONFIG_FILE);
+ }
+ while(!feof(conf_file)) {
+ line = (char *) malloc(linesize);
+ if(line == NULL) {
+ fprintf(LOGFILE, "malloc failed while reading configuration file.\n");
+ exit(OUT_OF_MEMORY);
+ }
+ size_read = getline(&line,&linesize,conf_file);
+ //feof returns true only after we read past EOF.
+ //so a file with no new line, at last can reach this place
+ //if size_read returns negative check for eof condition
+ if (size_read == -1) {
+ if(!feof(conf_file)){
+ fprintf(LOGFILE, "getline returned error.\n");
+ exit(INVALID_CONFIG_FILE);
+ }else {
+ free(line);
+ break;
+ }
+ }
+ //trim the ending new line
+ line[strlen(line)-1] = '\0';
+ //comment line
+ if(line[0] == '#') {
+ free(line);
+ continue;
+ }
+ //tokenize first to get key and list of values.
+ //if no equals is found ignore this line, can be an empty line also
+ equaltok = strtok_r(line, "=", &temp_equaltok);
+ if(equaltok == NULL) {
+ free(line);
+ continue;
+ }
+ config.confdetails[config.size] = (struct confentry *) malloc(
+ sizeof(struct confentry));
+ if(config.confdetails[config.size] == NULL) {
+ fprintf(LOGFILE,
+ "Failed allocating memory for single configuration item\n");
+ goto cleanup;
+ }
+
+ #ifdef DEBUG
+ fprintf(LOGFILE, "read_config : Adding conf key : %s \n", equaltok);
+ #endif
+
+ memset(config.confdetails[config.size], 0, sizeof(struct confentry));
+ config.confdetails[config.size]->key = (char *) malloc(
+ sizeof(char) * (strlen(equaltok)+1));
+ strcpy((char *)config.confdetails[config.size]->key, equaltok);
+ equaltok = strtok_r(NULL, "=", &temp_equaltok);
+ if (equaltok == NULL) {
+ fprintf(LOGFILE, "configuration tokenization failed \n");
+ goto cleanup;
+ }
+ //means value is commented so don't store the key
+ if(equaltok[0] == '#') {
+ free(line);
+ free((void *)config.confdetails[config.size]->key);
+ free(config.confdetails[config.size]);
+ continue;
+ }
+
+ #ifdef DEBUG
+ fprintf(LOGFILE, "read_config : Adding conf value : %s \n", equaltok);
+ #endif
+
+ config.confdetails[config.size]->value = (char *) malloc(
+ sizeof(char) * (strlen(equaltok)+1));
+ strcpy((char *)config.confdetails[config.size]->value, equaltok);
+ if((config.size + 1) % MAX_SIZE == 0) {
+ config.confdetails = (struct confentry **) realloc(config.confdetails,
+ sizeof(struct confentry **) * (MAX_SIZE + config.size));
+ if (config.confdetails == NULL) {
+ fprintf(LOGFILE,
+ "Failed re-allocating memory for configuration items\n");
+ goto cleanup;
+ }
+ }
+ if(config.confdetails[config.size] )
+ config.size++;
+ free(line);
+ }
+
+ //close the file
+ fclose(conf_file);
+
+ if (config.size == 0) {
+ fprintf(LOGFILE, "Invalid configuration provided in %s\n", file_name);
+ exit(INVALID_CONFIG_FILE);
+ }
+ //clean up allocated file name
+ return;
+ //free spaces alloced.
+ cleanup:
+ if (line != NULL) {
+ free(line);
+ }
+ fclose(conf_file);
+ free_configurations();
+ return;
+}
+
+/*
+ * function used to get a configuration value.
+ * The function for the first time populates the configuration details into
+ * array, next time onwards used the populated array.
+ *
+ */
+char * get_value(const char* key) {
+ int count;
+ for (count = 0; count < config.size; count++) {
+ if (strcmp(config.confdetails[count]->key, key) == 0) {
+ return strdup(config.confdetails[count]->value);
+ }
+ }
+ return NULL;
+}
+
+/**
+ * Function to return an array of values for a key.
+ * Value delimiter is assumed to be a comma.
+ */
+char ** get_values(const char * key) {
+ char ** toPass = NULL;
+ char *value = get_value(key);
+ char *tempTok = NULL;
+ char *tempstr = NULL;
+ int size = 0;
+ int toPassSize = MAX_SIZE;
+
+ //first allocate any array of 10
+ if(value != NULL) {
+ toPass = (char **) malloc(sizeof(char *) * toPassSize);
+ tempTok = strtok_r((char *)value, ",", &tempstr);
+ while (tempTok != NULL) {
+ toPass[size++] = tempTok;
+ if(size == toPassSize) {
+ toPassSize += MAX_SIZE;
+ toPass = (char **) realloc(toPass,(sizeof(char *) *
+ (MAX_SIZE * toPassSize)));
+ }
+ tempTok = strtok_r(NULL, ",", &tempstr);
+ }
+ }
+ if (size > 0) {
+ toPass[size] = NULL;
+ }
+ return toPass;
+}
+
+// free an entry set of values
+void free_values(char** values) {
+ if (*values != NULL) {
+ free(*values);
+ }
+ if (values != NULL) {
+ free(values);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/impl/configuration.h b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/impl/configuration.h
new file mode 100644
index 0000000..16ca23d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/impl/configuration.h
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Ensure that the configuration file and all of the containing directories
+ * are only writable by root. Otherwise, an attacker can change the
+ * configuration and potentially cause damage.
+ * returns 0 if permissions are ok
+ */
+int check_configuration_permissions(const char* file_name);
+
+// read the given configuration file
+void read_config(const char* config_file);
+
+//method exposed to get the configurations
+char *get_value(const char* key);
+
+//function to return array of values pointing to the key. Values are
+//comma seperated strings.
+char ** get_values(const char* key);
+
+// free the memory returned by get_values
+void free_values(char** values);
+
+//method to free allocated configuration
+void free_configurations();
+
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/impl/container-executor.c b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/impl/container-executor.c
new file mode 100644
index 0000000..6064716
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/impl/container-executor.c
@@ -0,0 +1,1049 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "configuration.h"
+#include "container-executor.h"
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <fts.h>
+#include <errno.h>
+#include <grp.h>
+#include <unistd.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+
+static const int DEFAULT_MIN_USERID = 1000;
+
+static const char* DEFAULT_BANNED_USERS[] = {"mapred", "hdfs", "bin", 0};
+
+//struct to store the user details
+struct passwd *user_detail = NULL;
+
+FILE* LOGFILE = NULL;
+
+static uid_t tt_uid = -1;
+static gid_t tt_gid = -1;
+
+void set_tasktracker_uid(uid_t user, gid_t group) {
+ tt_uid = user;
+ tt_gid = group;
+}
+
+/**
+ * get the executable filename.
+ */
+char* get_executable() {
+ char buffer[PATH_MAX];
+ snprintf(buffer, PATH_MAX, "/proc/%u/exe", getpid());
+ char *filename = malloc(PATH_MAX);
+ ssize_t len = readlink(buffer, filename, PATH_MAX);
+ if (len == -1) {
+ fprintf(stderr, "Can't get executable name from %s - %s\n", buffer,
+ strerror(errno));
+ exit(-1);
+ } else if (len >= PATH_MAX) {
+ fprintf(LOGFILE, "Executable name %.*s is longer than %d characters.\n",
+ PATH_MAX, filename, PATH_MAX);
+ exit(-1);
+ }
+ filename[len] = '\0';
+ return filename;
+}
+
+/**
+ * Check the permissions on taskcontroller to make sure that security is
+ * promisable. For this, we need container-executor binary to
+ * * be user-owned by root
+ * * be group-owned by a configured special group.
+ * * others do not have any permissions
+ * * be setuid/setgid
+ */
+int check_taskcontroller_permissions(char *executable_file) {
+
+ errno = 0;
+ char * resolved_path = realpath(executable_file, NULL);
+ if (resolved_path == NULL) {
+ fprintf(LOGFILE,
+ "Error resolving the canonical name for the executable : %s!",
+ strerror(errno));
+ return -1;
+ }
+
+ struct stat filestat;
+ errno = 0;
+ if (stat(resolved_path, &filestat) != 0) {
+ fprintf(LOGFILE,
+ "Could not stat the executable : %s!.\n", strerror(errno));
+ return -1;
+ }
+
+ uid_t binary_euid = filestat.st_uid; // Binary's user owner
+ gid_t binary_gid = filestat.st_gid; // Binary's group owner
+
+ // Effective uid should be root
+ if (binary_euid != 0) {
+ fprintf(LOGFILE,
+ "The container-executor binary should be user-owned by root.\n");
+ return -1;
+ }
+
+ if (binary_gid != getgid()) {
+ fprintf(LOGFILE, "The configured tasktracker group %d is different from"
+ " the group of the executable %d\n", getgid(), binary_gid);
+ return -1;
+ }
+
+ // check others do not have read/write/execute permissions
+ if ((filestat.st_mode & S_IROTH) == S_IROTH || (filestat.st_mode & S_IWOTH)
+ == S_IWOTH || (filestat.st_mode & S_IXOTH) == S_IXOTH) {
+ fprintf(LOGFILE,
+ "The container-executor binary should not have read or write or"
+ " execute for others.\n");
+ return -1;
+ }
+
+ // Binary should be setuid/setgid executable
+ if ((filestat.st_mode & S_ISUID) == 0) {
+ fprintf(LOGFILE, "The container-executor binary should be set setuid.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Change the effective user id to limit damage.
+ */
+static int change_effective_user(uid_t user, gid_t group) {
+ if (geteuid() == user) {
+ return 0;
+ }
+ if (seteuid(0) != 0) {
+ return -1;
+ }
+ if (setegid(group) != 0) {
+ fprintf(LOGFILE, "Failed to set effective group id %d - %s\n", group,
+ strerror(errno));
+ return -1;
+ }
+ if (seteuid(user) != 0) {
+ fprintf(LOGFILE, "Failed to set effective user id %d - %s\n", user,
+ strerror(errno));
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * Change the real and effective user and group to abandon the super user
+ * priviledges.
+ */
+int change_user(uid_t user, gid_t group) {
+ if (user == getuid() && user == geteuid() &&
+ group == getgid() && group == getegid()) {
+ return 0;
+ }
+
+ if (seteuid(0) != 0) {
+ fprintf(LOGFILE, "unable to reacquire root - %s\n", strerror(errno));
+ fprintf(LOGFILE, "Real: %d:%d; Effective: %d:%d\n",
+ getuid(), getgid(), geteuid(), getegid());
+ return SETUID_OPER_FAILED;
+ }
+ if (setgid(group) != 0) {
+ fprintf(LOGFILE, "unable to set group to %d - %s\n", group,
+ strerror(errno));
+ fprintf(LOGFILE, "Real: %d:%d; Effective: %d:%d\n",
+ getuid(), getgid(), geteuid(), getegid());
+ return SETUID_OPER_FAILED;
+ }
+ if (setuid(user) != 0) {
+ fprintf(LOGFILE, "unable to set user to %d - %s\n", user, strerror(errno));
+ fprintf(LOGFILE, "Real: %d:%d; Effective: %d:%d\n",
+ getuid(), getgid(), geteuid(), getegid());
+ return SETUID_OPER_FAILED;
+ }
+
+ return 0;
+}
+
+/**
+ * Utility function to concatenate argB to argA using the concat_pattern.
+ */
+char *concatenate(char *concat_pattern, char *return_path_name,
+ int numArgs, ...) {
+ va_list ap;
+ va_start(ap, numArgs);
+ int strlen_args = 0;
+ char *arg = NULL;
+ int j;
+ for (j = 0; j < numArgs; j++) {
+ arg = va_arg(ap, char*);
+ if (arg == NULL) {
+ fprintf(LOGFILE, "One of the arguments passed for %s in null.\n",
+ return_path_name);
+ return NULL;
+ }
+ strlen_args += strlen(arg);
+ }
+ va_end(ap);
+
+ char *return_path = NULL;
+ int str_len = strlen(concat_pattern) + strlen_args + 1;
+
+ return_path = (char *) malloc(str_len);
+ if (return_path == NULL) {
+ fprintf(LOGFILE, "Unable to allocate memory for %s.\n", return_path_name);
+ return NULL;
+ }
+ va_start(ap, numArgs);
+ vsnprintf(return_path, str_len, concat_pattern, ap);
+ va_end(ap);
+ return return_path;
+}
+
+/**
+ * Get the job-directory path from tt_root, user name and job-id
+ */
+char *get_job_directory(const char * tt_root, const char *user,
+ const char *jobid) {
+ return concatenate(TT_JOB_DIR_PATTERN, "job_dir_path", 3, tt_root, user,
+ jobid);
+}
+
+/**
+ * Get the user directory of a particular user
+ */
+char *get_user_directory(const char *tt_root, const char *user) {
+ return concatenate(USER_DIR_PATTERN, "user_dir_path", 2, tt_root, user);
+}
+
+char *get_job_work_directory(const char *job_dir) {
+ return concatenate("%s/work", "job work", 1, job_dir);
+}
+
+/**
+ * Get the attempt directory for the given attempt_id
+ */
+char *get_attempt_work_directory(const char *tt_root, const char *user,
+ const char *job_id, const char *attempt_id) {
+ return concatenate(ATTEMPT_DIR_PATTERN, "attempt_dir_path", 4,
+ tt_root, user, job_id, attempt_id);
+}
+
+char *get_task_launcher_file(const char* work_dir) {
+ return concatenate("%s/%s", "task launcher", 2, work_dir, TASK_SCRIPT);
+}
+
+char *get_task_credentials_file(const char* work_dir) {
+ return concatenate("%s/%s", "task crednetials", 2, work_dir,
+ CREDENTIALS_FILENAME);
+}
+
+/**
+ * Get the job log directory under the given log_root
+ */
+char* get_job_log_directory(const char *log_root, const char* jobid) {
+ return concatenate("%s/%s", "job log dir", 2, log_root,
+ jobid);
+}
+
+/*
+ * Get a user subdirectory.
+ */
+char *get_user_subdirectory(const char *tt_root,
+ const char *user,
+ const char *subdir) {
+ char * user_dir = get_user_directory(tt_root, user);
+ char * result = concatenate("%s/%s", "user subdir", 2,
+ user_dir, subdir);
+ free(user_dir);
+ return result;
+}
+
+/**
+ * Ensure that the given path and all of the parent directories are created
+ * with the desired permissions.
+ */
+int mkdirs(const char* path, mode_t perm) {
+ char *buffer = strdup(path);
+ char *token;
+ int cwd = open("/", O_RDONLY);
+ if (cwd == -1) {
+ fprintf(LOGFILE, "Can't open / in %s - %s\n", path, strerror(errno));
+ free(buffer);
+ return -1;
+ }
+ for(token = strtok(buffer, "/"); token != NULL; token = strtok(NULL, "/")) {
+ if (mkdirat(cwd, token, perm) != 0) {
+ if (errno != EEXIST) {
+ fprintf(LOGFILE, "Can't create directory %s in %s - %s\n",
+ token, path, strerror(errno));
+ close(cwd);
+ free(buffer);
+ return -1;
+ }
+ }
+ int new_dir = openat(cwd, token, O_RDONLY);
+ close(cwd);
+ cwd = new_dir;
+ if (cwd == -1) {
+ fprintf(LOGFILE, "Can't open %s in %s - %s\n", token, path,
+ strerror(errno));
+ free(buffer);
+ return -1;
+ }
+ }
+ free(buffer);
+ close(cwd);
+ return 0;
+}
+
+/**
+ * Function to prepare the attempt directories for the task JVM.
+ * It creates the task work and log directories.
+ */
+static int create_attempt_directories(const char* user, const char *job_id,
+ const char *task_id) {
+ // create dirs as 0750
+ const mode_t perms = S_IRWXU | S_IRGRP | S_IXGRP;
+ if (job_id == NULL || task_id == NULL || user == NULL) {
+ fprintf(LOGFILE,
+ "Either task_id is null or the user passed is null.\n");
+ return -1;
+ }
+
+ int result = -1;
+
+ char **local_dir = get_values(TT_SYS_DIR_KEY);
+
+ if (local_dir == NULL) {
+ fprintf(LOGFILE, "%s is not configured.\n", TT_SYS_DIR_KEY);
+ return -1;
+ }
+
+ char **local_dir_ptr;
+ for(local_dir_ptr = local_dir; *local_dir_ptr != NULL; ++local_dir_ptr) {
+ char *task_dir = get_attempt_work_directory(*local_dir_ptr, user, job_id,
+ task_id);
+ if (task_dir == NULL) {
+ free_values(local_dir);
+ return -1;
+ }
+ if (mkdirs(task_dir, perms) != 0) {
+ // continue on to create other task directories
+ free(task_dir);
+ } else {
+ result = 0;
+ free(task_dir);
+ }
+ }
+ free_values(local_dir);
+ if (result != 0) {
+ return result;
+ }
+
+ result = -1;
+ // also make the directory for the task logs
+ char *job_task_name = malloc(strlen(job_id) + strlen(task_id) + 2);
+ if (job_task_name == NULL) {
+ fprintf(LOGFILE, "Malloc of job task name failed\n");
+ result = -1;
+ } else {
+ sprintf(job_task_name, "%s/%s", job_id, task_id);
+
+ char **log_dir = get_values(TT_LOG_DIR_KEY);
+ if (log_dir == NULL) {
+ fprintf(LOGFILE, "%s is not configured.\n", TT_LOG_DIR_KEY);
+ return -1;
+ }
+
+ char **log_dir_ptr;
+ for(log_dir_ptr = log_dir; *log_dir_ptr != NULL; ++log_dir_ptr) {
+ char *job_log_dir = get_job_log_directory(*log_dir_ptr, job_task_name);
+ if (job_log_dir == NULL) {
+ free_values(log_dir);
+ return -1;
+ } else if (mkdirs(job_log_dir, perms) != 0) {
+ free(job_log_dir);
+ } else {
+ result = 0;
+ free(job_log_dir);
+ }
+ }
+ free(job_task_name);
+ free_values(log_dir);
+ }
+ return result;
+}
+
+/**
+ * Load the user information for a given user name.
+ */
+static struct passwd* get_user_info(const char* user) {
+ int string_size = sysconf(_SC_GETPW_R_SIZE_MAX);
+ void* buffer = malloc(string_size + sizeof(struct passwd));
+ struct passwd *result = NULL;
+ if (getpwnam_r(user, buffer, buffer + sizeof(struct passwd), string_size,
+ &result) != 0) {
+ free(buffer);
+ fprintf(LOGFILE, "Can't get user information %s - %s\n", user,
+ strerror(errno));
+ return NULL;
+ }
+ return result;
+}
+
+/**
+ * Is the user a real user account?
+ * Checks:
+ * 1. Not root
+ * 2. UID is above the minimum configured.
+ * 3. Not in banned user list
+ * Returns NULL on failure
+ */
+struct passwd* check_user(const char *user) {
+ if (strcmp(user, "root") == 0) {
+ fprintf(LOGFILE, "Running as root is not allowed\n");
+ return NULL;
+ }
+ char *min_uid_str = get_value(MIN_USERID_KEY);
+ int min_uid = DEFAULT_MIN_USERID;
+ if (min_uid_str != NULL) {
+ char *end_ptr = NULL;
+ min_uid = strtol(min_uid_str, &end_ptr, 10);
+ if (min_uid_str == end_ptr || *end_ptr != '\0') {
+ fprintf(LOGFILE, "Illegal value of %s for %s in configuration\n",
+ min_uid_str, MIN_USERID_KEY);
+ free(min_uid_str);
+ return NULL;
+ }
+ free(min_uid_str);
+ }
+ struct passwd *user_info = get_user_info(user);
+ if (NULL == user_info) {
+ fprintf(LOGFILE, "User %s not found\n", user);
+ return NULL;
+ }
+ if (user_info->pw_uid < min_uid) {
+ fprintf(LOGFILE, "Requested user %s has id %d, which is below the "
+ "minimum allowed %d\n", user, user_info->pw_uid, min_uid);
+ free(user_info);
+ return NULL;
+ }
+ char **banned_users = get_values(BANNED_USERS_KEY);
+ char **banned_user = (banned_users == NULL) ?
+ (char**) DEFAULT_BANNED_USERS : banned_users;
+ for(; *banned_user; ++banned_user) {
+ if (strcmp(*banned_user, user) == 0) {
+ free(user_info);
+ fprintf(LOGFILE, "Requested user %s is banned\n", user);
+ return NULL;
+ }
+ }
+ if (banned_users != NULL) {
+ free_values(banned_users);
+ }
+ return user_info;
+}
+
+/**
+ * function used to populate and user_details structure.
+ */
+int set_user(const char *user) {
+ // free any old user
+ if (user_detail != NULL) {
+ free(user_detail);
+ user_detail = NULL;
+ }
+ user_detail = check_user(user);
+ if (user_detail == NULL) {
+ return -1;
+ }
+ return change_effective_user(user_detail->pw_uid, user_detail->pw_gid);
+}
+
+/**
+ * Change the ownership of the given file or directory to the new user.
+ */
+static int change_owner(const char* path, uid_t user, gid_t group) {
+ if (geteuid() == user && getegid() == group) {
+ return 0;
+ } else {
+ uid_t old_user = geteuid();
+ gid_t old_group = getegid();
+ if (change_effective_user(0, group) != 0) {
+ return -1;
+ }
+ if (chown(path, user, group) != 0) {
+ fprintf(LOGFILE, "Can't chown %s to %d:%d - %s\n", path, user, group,
+ strerror(errno));
+ return -1;
+ }
+ return change_effective_user(old_user, old_group);
+ }
+}
+
+/**
+ * Create a top level directory for the user.
+ * It assumes that the parent directory is *not* writable by the user.
+ * It creates directories with 02750 permissions owned by the user
+ * and with the group set to the task tracker group.
+ * return non-0 on failure
+ */
+int create_directory_for_user(const char* path) {
+ // set 2750 permissions and group sticky bit
+ mode_t permissions = S_IRWXU | S_IRGRP | S_IXGRP | S_ISGID;
+ uid_t user = geteuid();
+ gid_t group = getegid();
+ int ret = 0;
+ ret = change_effective_user(0, tt_gid);
+ if (ret == 0) {
+ if (0 == mkdir(path, permissions) || EEXIST == errno) {
+ // need to reassert the group sticky bit
+ if (chmod(path, permissions) != 0) {
+ fprintf(LOGFILE, "Can't chmod %s to add the sticky bit - %s\n",
+ path, strerror(errno));
+ ret = -1;
+ } else if (change_owner(path, user, tt_gid) != 0) {
+ fprintf(LOGFILE, "Failed to chown %s to %d:%d: %s\n", path, user, tt_gid,
+ strerror(errno));
+ ret = -1;
+ }
+ } else {
+ fprintf(LOGFILE, "Failed to create directory %s - %s\n", path,
+ strerror(errno));
+ ret = -1;
+ }
+ }
+ if (change_effective_user(user, group) != 0) {
+ ret = -1;
+ }
+ return ret;
+}
+
+/**
+ * Open a file as the tasktracker and return a file descriptor for it.
+ * Returns -1 on error
+ */
+static int open_file_as_task_tracker(const char* filename) {
+ uid_t user = geteuid();
+ gid_t group = getegid();
+ if (change_effective_user(tt_uid, tt_gid) != 0) {
+ return -1;
+ }
+ int result = open(filename, O_RDONLY);
+ if (result == -1) {
+ fprintf(LOGFILE, "Can't open file %s as task tracker - %s\n", filename,
+ strerror(errno));
+ }
+ if (change_effective_user(user, group)) {
+ result = -1;
+ }
+ return result;
+}
+
+/**
+ * Copy a file from a fd to a given filename.
+ * The new file must not exist and it is created with permissions perm.
+ * The input stream is closed.
+ * Return 0 if everything is ok.
+ */
+static int copy_file(int input, const char* in_filename,
+ const char* out_filename, mode_t perm) {
+ const int buffer_size = 128*1024;
+ char buffer[buffer_size];
+ int out_fd = open(out_filename, O_WRONLY|O_CREAT|O_EXCL|O_NOFOLLOW, perm);
+ if (out_fd == -1) {
+ fprintf(LOGFILE, "Can't open %s for output - %s\n", out_filename,
+ strerror(errno));
+ return -1;
+ }
+ ssize_t len = read(input, buffer, buffer_size);
+ while (len > 0) {
+ ssize_t pos = 0;
+ while (pos < len) {
+ ssize_t write_result = write(out_fd, buffer + pos, len - pos);
+ if (write_result <= 0) {
+ fprintf(LOGFILE, "Error writing to %s - %s\n", out_filename,
+ strerror(errno));
+ close(out_fd);
+ return -1;
+ }
+ pos += write_result;
+ }
+ len = read(input, buffer, buffer_size);
+ }
+ if (len < 0) {
+ fprintf(LOGFILE, "Failed to read file %s - %s\n", in_filename,
+ strerror(errno));
+ close(out_fd);
+ return -1;
+ }
+ if (close(out_fd) != 0) {
+ fprintf(LOGFILE, "Failed to close file %s - %s\n", out_filename,
+ strerror(errno));
+ return -1;
+ }
+ close(input);
+ return 0;
+}
+
+/**
+ * Function to initialize the user directories of a user.
+ */
+int initialize_user(const char *user) {
+ char **local_dir = get_values(TT_SYS_DIR_KEY);
+ if (local_dir == NULL) {
+ fprintf(LOGFILE, "%s is not configured.\n", TT_SYS_DIR_KEY);
+ return INVALID_TT_ROOT;
+ }
+
+ char *user_dir;
+ char **local_dir_ptr = local_dir;
+ int failed = 0;
+ for(local_dir_ptr = local_dir; *local_dir_ptr != 0; ++local_dir_ptr) {
+ user_dir = get_user_directory(*local_dir_ptr, user);
+ if (user_dir == NULL) {
+ fprintf(LOGFILE, "Couldn't get userdir directory for %s.\n", user);
+ failed = 1;
+ break;
+ }
+ if (create_directory_for_user(user_dir) != 0) {
+ failed = 1;
+ }
+ free(user_dir);
+ }
+ free_values(local_dir);
+ return failed ? INITIALIZE_USER_FAILED : 0;
+}
+
+/**
+ * Function to prepare the job directories for the task JVM.
+ */
+int initialize_job(const char *user, const char *jobid,
+ const char* nmPrivate_credentials_file, char* const* args) {
+ if (jobid == NULL || user == NULL) {
+ fprintf(LOGFILE, "Either jobid is null or the user passed is null.\n");
+ return INVALID_ARGUMENT_NUMBER;
+ }
+
+ // create the user directory on all disks
+ int result = initialize_user(user);
+ if (result != 0) {
+ return result;
+ }
+
+ ////////////// create the log directories for the app on all disks
+ char **log_roots = get_values(TT_LOG_DIR_KEY);
+ if (log_roots == NULL) {
+ return INVALID_CONFIG_FILE;
+ }
+ char **log_root;
+ char *any_one_job_log_dir = NULL;
+ for(log_root=log_roots; *log_root != NULL; ++log_root) {
+ char *job_log_dir = get_job_log_directory(*log_root, jobid);
+ if (job_log_dir == NULL) {
+ // try the next one
+ } else if (create_directory_for_user(job_log_dir) != 0) {
+ free(job_log_dir);
+ return -1;
+ } else if (any_one_job_log_dir == NULL) {
+ any_one_job_log_dir = job_log_dir;
+ } else {
+ free(job_log_dir);
+ }
+ }
+ free_values(log_roots);
+ if (any_one_job_log_dir == NULL) {
+ fprintf(LOGFILE, "Did not create any job-log directories\n");
+ return -1;
+ }
+ free(any_one_job_log_dir);
+ ////////////// End of creating the log directories for the app on all disks
+
+ // open up the credentials file
+ int cred_file = open_file_as_task_tracker(nmPrivate_credentials_file);
+ if (cred_file == -1) {
+ return -1;
+ }
+
+ // give up root privs
+ if (change_user(user_detail->pw_uid, user_detail->pw_gid) != 0) {
+ return -1;
+ }
+
+ // 750
+ mode_t permissions = S_IRWXU | S_IRGRP | S_IXGRP;
+ char **tt_roots = get_values(TT_SYS_DIR_KEY);
+
+ if (tt_roots == NULL) {
+ return INVALID_CONFIG_FILE;
+ }
+
+ char **tt_root;
+ char *primary_job_dir = NULL;
+ for(tt_root=tt_roots; *tt_root != NULL; ++tt_root) {
+ char *job_dir = get_job_directory(*tt_root, user, jobid);
+ if (job_dir == NULL) {
+ // try the next one
+ } else if (mkdirs(job_dir, permissions) != 0) {
+ free(job_dir);
+ } else if (primary_job_dir == NULL) {
+ primary_job_dir = job_dir;
+ } else {
+ free(job_dir);
+ }
+ }
+ free_values(tt_roots);
+ if (primary_job_dir == NULL) {
+ fprintf(LOGFILE, "Did not create any job directories\n");
+ return -1;
+ }
+
+ char *nmPrivate_credentials_file_copy = strdup(nmPrivate_credentials_file);
+ // TODO: FIXME. The user's copy of creds should go to a path selected by
+ // localDirAllocatoir
+ char *cred_file_name = concatenate("%s/%s", "cred file", 2,
+ primary_job_dir, basename(nmPrivate_credentials_file_copy));
+ if (cred_file_name == NULL) {
+ free(nmPrivate_credentials_file_copy);
+ return -1;
+ }
+ if (copy_file(cred_file, nmPrivate_credentials_file,
+ cred_file_name, S_IRUSR|S_IWUSR) != 0){
+ free(nmPrivate_credentials_file_copy);
+ return -1;
+ }
+
+ free(nmPrivate_credentials_file_copy);
+
+ fclose(stdin);
+ fflush(LOGFILE);
+ if (LOGFILE != stdout) {
+ fclose(stdout);
+ }
+ fclose(stderr);
+ if (chdir(primary_job_dir) != 0) {
+ fprintf(LOGFILE, "Failed to chdir to job dir - %s\n", strerror(errno));
+ return -1;
+ }
+ execvp(args[0], args);
+ fprintf(LOGFILE, "Failure to exec job initialization process - %s\n",
+ strerror(errno));
+ return -1;
+}
+
+/*
+ * Function used to launch a task as the provided user. It does the following :
+ * 1) Creates attempt work dir and log dir to be accessible by the child
+ * 2) Copies the script file from the TT to the work directory
+ * 3) Sets up the environment
+ * 4) Does an execlp on the same in order to replace the current image with
+ * task image.
+ */
+int run_task_as_user(const char *user, const char *job_id,
+ const char *task_id, const char *work_dir,
+ const char *script_name, const char *cred_file) {
+ int exit_code = -1;
+ char *script_file_dest = NULL;
+ char *cred_file_dest = NULL;
+ script_file_dest = get_task_launcher_file(work_dir);
+ if (script_file_dest == NULL) {
+ exit_code = OUT_OF_MEMORY;
+ goto cleanup;
+ }
+ cred_file_dest = get_task_credentials_file(work_dir);
+ if (NULL == cred_file_dest) {
+ exit_code = OUT_OF_MEMORY;
+ goto cleanup;
+ }
+
+ // open launch script
+ int task_file_source = open_file_as_task_tracker(script_name);
+ if (task_file_source == -1) {
+ goto cleanup;
+ }
+
+ // open credentials
+ int cred_file_source = open_file_as_task_tracker(cred_file);
+ if (cred_file_source == -1) {
+ goto cleanup;
+ }
+
+ // give up root privs
+ if (change_user(user_detail->pw_uid, user_detail->pw_gid) != 0) {
+ exit_code = SETUID_OPER_FAILED;
+ goto cleanup;
+ }
+
+ if (create_attempt_directories(user, job_id, task_id) != 0) {
+ fprintf(LOGFILE, "Could not create attempt dirs");
+ goto cleanup;
+ }
+
+ // 700
+ if (copy_file(task_file_source, script_name, script_file_dest,S_IRWXU) != 0) {
+ goto cleanup;
+ }
+
+ // 600
+ if (copy_file(cred_file_source, cred_file, cred_file_dest,
+ S_IRUSR | S_IWUSR) != 0) {
+ goto cleanup;
+ }
+
+ fcloseall();
+ umask(0027);
+ if (chdir(work_dir) != 0) {
+ fprintf(LOGFILE, "Can't change directory to %s -%s\n", work_dir,
+ strerror(errno));
+ goto cleanup;
+ }
+ if (execlp(script_file_dest, script_file_dest, NULL) != 0) {
+ fprintf(LOGFILE, "Couldn't execute the task jvm file %s - %s",
+ script_file_dest, strerror(errno));
+ exit_code = UNABLE_TO_EXECUTE_TASK_SCRIPT;
+ goto cleanup;
+ }
+ exit_code = 0;
+
+ cleanup:
+ free(script_file_dest);
+ free(cred_file_dest);
+ return exit_code;
+}
+
+/**
+ * Function used to signal a task launched by the user.
+ * The function sends appropriate signal to the process group
+ * specified by the task_pid.
+ */
+int signal_user_task(const char *user, int pid, int sig) {
+ if(pid <= 0) {
+ return INVALID_TASK_PID;
+ }
+
+ if (change_user(user_detail->pw_uid, user_detail->pw_gid) != 0) {
+ return SETUID_OPER_FAILED;
+ }
+
+ //Don't continue if the process-group is not alive anymore.
+ int has_group = 1;
+ if (kill(-pid,0) < 0) {
+ if (kill(pid, 0) < 0) {
+ if (errno == ESRCH) {
+ return INVALID_TASK_PID;
+ }
+ fprintf(LOGFILE, "Error signalling task %d with %d - %s\n",
+ pid, sig, strerror(errno));
+ return -1;
+ } else {
+ has_group = 0;
+ }
+ }
+
+ if (kill((has_group ? -1 : 1) * pid, sig) < 0) {
+ if(errno != ESRCH) {
+ fprintf(LOGFILE,
+ "Error signalling process group %d with signal %d - %s\n",
+ -pid, sig, strerror(errno));
+ return UNABLE_TO_KILL_TASK;
+ } else {
+ return INVALID_TASK_PID;
+ }
+ }
+ fprintf(LOGFILE, "Killing process %s%d with %d\n",
+ (has_group ? "group " :""), pid, sig);
+ return 0;
+}
+
+/**
+ * Delete a final directory as the task tracker user.
+ */
+static int rmdir_as_tasktracker(const char* path) {
+ int user_uid = geteuid();
+ int user_gid = getegid();
+ int ret = change_effective_user(tt_uid, tt_gid);
+ if (ret == 0) {
+ if (rmdir(path) != 0) {
+ fprintf(LOGFILE, "rmdir of %s failed - %s\n", path, strerror(errno));
+ ret = -1;
+ }
+ }
+ // always change back
+ if (change_effective_user(user_uid, user_gid) != 0) {
+ ret = -1;
+ }
+ return ret;
+}
+
+/**
+ * Recursively delete the given path.
+ * full_path : the path to delete
+ * needs_tt_user: the top level directory must be deleted by the tt user.
+ */
+static int delete_path(const char *full_path,
+ int needs_tt_user) {
+ int exit_code = 0;
+
+ if (full_path == NULL) {
+ fprintf(LOGFILE, "Path is null\n");
+ exit_code = UNABLE_TO_BUILD_PATH; // may be malloc failed
+ } else {
+ char *(paths[]) = {strdup(full_path), 0};
+ if (paths[0] == NULL) {
+ fprintf(LOGFILE, "Malloc failed in delete_path\n");
+ return -1;
+ }
+ // check to make sure the directory exists
+ if (access(full_path, F_OK) != 0) {
+ if (errno == ENOENT) {
+ free(paths[0]);
+ return 0;
+ }
+ }
+ FTS* tree = fts_open(paths, FTS_PHYSICAL | FTS_XDEV, NULL);
+ FTSENT* entry = NULL;
+ int ret = 0;
+
+ if (tree == NULL) {
+ fprintf(LOGFILE,
+ "Cannot open file traversal structure for the path %s:%s.\n",
+ full_path, strerror(errno));
+ free(paths[0]);
+ return -1;
+ }
+ while (((entry = fts_read(tree)) != NULL) && exit_code == 0) {
+ switch (entry->fts_info) {
+
+ case FTS_DP: // A directory being visited in post-order
+ if (!needs_tt_user ||
+ strcmp(entry->fts_path, full_path) != 0) {
+ if (rmdir(entry->fts_accpath) != 0) {
+ fprintf(LOGFILE, "Couldn't delete directory %s - %s\n",
+ entry->fts_path, strerror(errno));
+ exit_code = -1;
+ }
+ }
+ break;
+
+ case FTS_F: // A regular file
+ case FTS_SL: // A symbolic link
+ case FTS_SLNONE: // A broken symbolic link
+ case FTS_DEFAULT: // Unknown type of file
+ if (unlink(entry->fts_accpath) != 0) {
+ fprintf(LOGFILE, "Couldn't delete file %s - %s\n", entry->fts_path,
+ strerror(errno));
+ exit_code = -1;
+ }
+ break;
+
+ case FTS_DNR: // Unreadable directory
+ fprintf(LOGFILE, "Unreadable directory %s. Skipping..\n",
+ entry->fts_path);
+ break;
+
+ case FTS_D: // A directory in pre-order
+ // if the directory isn't readable, chmod it
+ if ((entry->fts_statp->st_mode & 0200) == 0) {
+ fprintf(LOGFILE, "Unreadable directory %s, chmoding.\n",
+ entry->fts_path);
+ if (chmod(entry->fts_accpath, 0700) != 0) {
+ fprintf(LOGFILE, "Error chmoding %s - %s, continuing\n",
+ entry->fts_path, strerror(errno));
+ }
+ }
+ break;
+
+ case FTS_NS: // A file with no stat(2) information
+ // usually a root directory that doesn't exist
+ fprintf(LOGFILE, "Directory not found %s\n", entry->fts_path);
+ break;
+
+ case FTS_DC: // A directory that causes a cycle
+ case FTS_DOT: // A dot directory
+ case FTS_NSOK: // No stat information requested
+ break;
+
+ case FTS_ERR: // Error return
+ fprintf(LOGFILE, "Error traversing directory %s - %s\n",
+ entry->fts_path, strerror(entry->fts_errno));
+ exit_code = -1;
+ break;
+ break;
+ default:
+ exit_code = -1;
+ break;
+ }
+ }
+ ret = fts_close(tree);
+ if (exit_code == 0 && ret != 0) {
+ fprintf(LOGFILE, "Error in fts_close while deleting %s\n", full_path);
+ exit_code = -1;
+ }
+ if (needs_tt_user) {
+ // If the delete failed, try a final rmdir as root on the top level.
+ // That handles the case where the top level directory is in a directory
+ // that is owned by the task tracker.
+ exit_code = rmdir_as_tasktracker(full_path);
+ }
+ free(paths[0]);
+ }
+ return exit_code;
+}
+
+/**
+ * Delete the given directory as the user from each of the tt_root directories
+ * user: the user doing the delete
+ * subdir: the subdir to delete (if baseDirs is empty, this is treated as
+ an absolute path)
+ * baseDirs: (optional) the baseDirs where the subdir is located
+ */
+int delete_as_user(const char *user,
+ const char *subdir,
+ char* const* baseDirs) {
+ int ret = 0;
+
+ char** ptr;
+
+ // TODO: No switching user? !!!!
+ if (baseDirs == NULL || *baseDirs == NULL) {
+ return delete_path(subdir, strlen(subdir) == 0);
+ }
+ // do the delete
+ for(ptr = (char**)baseDirs; *ptr != NULL; ++ptr) {
+ char* full_path = concatenate("%s/%s", "user subdir", 2,
+ *ptr, subdir);
+ if (full_path == NULL) {
+ return -1;
+ }
+ int this_ret = delete_path(full_path, strlen(subdir) == 0);
+ free(full_path);
+ // delete as much as we can, but remember the error
+ if (this_ret != 0) {
+ ret = this_ret;
+ }
+ }
+ return ret;
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/impl/container-executor.h b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/impl/container-executor.h
new file mode 100644
index 0000000..38472cb
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/impl/container-executor.h
@@ -0,0 +1,163 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <pwd.h>
+#include <stdio.h>
+#include <sys/types.h>
+
+//command definitions
+enum command {
+ INITIALIZE_JOB = 0,
+ LAUNCH_TASK_JVM = 1,
+ SIGNAL_TASK = 2,
+ DELETE_AS_USER = 3,
+};
+
+enum errorcodes {
+ INVALID_ARGUMENT_NUMBER = 1,
+ INVALID_USER_NAME, //2
+ INVALID_COMMAND_PROVIDED, //3
+ SUPER_USER_NOT_ALLOWED_TO_RUN_TASKS, //4
+ INVALID_TT_ROOT, //5
+ SETUID_OPER_FAILED, //6
+ UNABLE_TO_EXECUTE_TASK_SCRIPT, //7
+ UNABLE_TO_KILL_TASK, //8
+ INVALID_TASK_PID, //9
+ ERROR_RESOLVING_FILE_PATH, //10
+ RELATIVE_PATH_COMPONENTS_IN_FILE_PATH, //11
+ UNABLE_TO_STAT_FILE, //12
+ FILE_NOT_OWNED_BY_TASKTRACKER, //13
+ PREPARE_ATTEMPT_DIRECTORIES_FAILED, //14
+ INITIALIZE_JOB_FAILED, //15
+ PREPARE_TASK_LOGS_FAILED, //16
+ INVALID_TT_LOG_DIR, //17
+ OUT_OF_MEMORY, //18
+ INITIALIZE_DISTCACHEFILE_FAILED, //19
+ INITIALIZE_USER_FAILED, //20
+ UNABLE_TO_BUILD_PATH, //21
+ INVALID_TASKCONTROLLER_PERMISSIONS, //22
+ PREPARE_JOB_LOGS_FAILED, //23
+ INVALID_CONFIG_FILE, // 24
+};
+
+#define TT_GROUP_KEY "mapreduce.tasktracker.group"
+#define USER_DIR_PATTERN "%s/usercache/%s"
+#define TT_JOB_DIR_PATTERN USER_DIR_PATTERN "/appcache/%s"
+#define ATTEMPT_DIR_PATTERN TT_JOB_DIR_PATTERN "/%s"
+#define TASK_SCRIPT "task.sh"
+#define TT_LOCAL_TASK_DIR_PATTERN TT_JOB_DIR_PATTERN "/%s"
+#define TT_SYS_DIR_KEY "mapreduce.cluster.local.dir"
+#define TT_LOG_DIR_KEY "hadoop.log.dir"
+#define CREDENTIALS_FILENAME "container_tokens"
+#define MIN_USERID_KEY "min.user.id"
+#define BANNED_USERS_KEY "banned.users"
+
+extern struct passwd *user_detail;
+
+// the log file for error messages
+extern FILE *LOGFILE;
+
+// get the executable's filename
+char* get_executable();
+
+int check_taskcontroller_permissions(char *executable_file);
+
+/**
+ * delete a given log directory as a user
+ */
+int delete_log_directory(const char *log_dir);
+
+// initialize the job directory
+int initialize_job(const char *user, const char *jobid,
+ const char *credentials, char* const* args);
+
+// run the task as the user
+int run_task_as_user(const char * user, const char *jobid, const char *taskid,
+ const char *work_dir, const char *script_name,
+ const char *cred_file);
+
+// send a signal as the user
+int signal_user_task(const char *user, int pid, int sig);
+
+// delete a directory (or file) recursively as the user. The directory
+// could optionally be relative to the baseDir set of directories (if the same
+// directory appears on multiple disk volumes, the disk volumes should be passed
+// as the baseDirs). If baseDirs is not specified, then dir_to_be_deleted is
+// assumed as the absolute path
+int delete_as_user(const char *user,
+ const char *dir_to_be_deleted,
+ char* const* baseDirs);
+
+// set the task tracker's uid and gid
+void set_tasktracker_uid(uid_t user, gid_t group);
+
+/**
+ * Is the user a real user account?
+ * Checks:
+ * 1. Not root
+ * 2. UID is above the minimum configured.
+ * 3. Not in banned user list
+ * Returns NULL on failure
+ */
+struct passwd* check_user(const char *user);
+
+// set the user
+int set_user(const char *user);
+
+// methods to get the directories
+
+char *get_user_directory(const char *tt_root, const char *user);
+
+char *get_job_directory(const char * tt_root, const char *user,
+ const char *jobid);
+
+char *get_attempt_work_directory(const char *tt_root, const char *user,
+ const char *job_dir, const char *attempt_id);
+
+char *get_task_launcher_file(const char* work_dir);
+
+char *get_task_credentials_file(const char* work_dir);
+
+/**
+ * Get the job log directory under log_root
+ */
+char* get_job_log_directory(const char* log_root, const char* jobid);
+
+char *get_task_log_dir(const char *log_dir, const char *job_id,
+ const char *attempt_id);
+
+/**
+ * Ensure that the given path and all of the parent directories are created
+ * with the desired permissions.
+ */
+int mkdirs(const char* path, mode_t perm);
+
+/**
+ * Function to initialize the user directories of a user.
+ */
+int initialize_user(const char *user);
+
+/**
+ * Create a top level directory for the user.
+ * It assumes that the parent directory is *not* writable by the user.
+ * It creates directories with 02700 permissions owned by the user
+ * and with the group set to the task tracker group.
+ * return non-0 on failure
+ */
+int create_directory_for_user(const char* path);
+
+int change_user(uid_t user, gid_t group);
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/impl/main.c b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/impl/main.c
new file mode 100644
index 0000000..a3d5631
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/impl/main.c
@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "configuration.h"
+#include "container-executor.h"
+
+#include <errno.h>
+#include <grp.h>
+#include <limits.h>
+#include <unistd.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+
+#define _STRINGIFY(X) #X
+#define STRINGIFY(X) _STRINGIFY(X)
+#define CONF_FILENAME "taskcontroller.cfg"
+
+void display_usage(FILE *stream) {
+ fprintf(stream,
+ "Usage: container-executor user command command-args\n");
+ fprintf(stream, "Commands:\n");
+ fprintf(stream, " initialize job: %2d jobid tokens cmd args\n",
+ INITIALIZE_JOB);
+ fprintf(stream, " launch task: %2d jobid taskid workdir task-script jobTokens\n",
+ LAUNCH_TASK_JVM);
+ fprintf(stream, " signal task: %2d task-pid signal\n",
+ SIGNAL_TASK);
+ fprintf(stream, " delete as user: %2d relative-path\n",
+ DELETE_AS_USER);
+}
+
+int main(int argc, char **argv) {
+ //Minimum number of arguments required to run the container-executor
+ if (argc < 4) {
+ display_usage(stdout);
+ return INVALID_ARGUMENT_NUMBER;
+ }
+
+ LOGFILE = stdout;
+ int command;
+ const char * job_id = NULL;
+ const char * task_id = NULL;
+ const char * cred_file = NULL;
+ const char * script_file = NULL;
+ const char * current_dir = NULL;
+
+ int exit_code = 0;
+
+ char * dir_to_be_deleted = NULL;
+
+ char *executable_file = get_executable();
+
+#ifndef HADOOP_CONF_DIR
+ #error HADOOP_CONF_DIR must be defined
+#endif
+
+ char *orig_conf_file = STRINGIFY(HADOOP_CONF_DIR) "/" CONF_FILENAME;
+ char *conf_file = realpath(orig_conf_file, NULL);
+
+ if (conf_file == NULL) {
+ fprintf(LOGFILE, "Configuration file %s not found.\n", orig_conf_file);
+ return INVALID_CONFIG_FILE;
+ }
+ if (check_configuration_permissions(conf_file) != 0) {
+ return INVALID_CONFIG_FILE;
+ }
+ read_config(conf_file);
+ free(conf_file);
+
+ // look up the task tracker group in the config file
+ char *tt_group = get_value(TT_GROUP_KEY);
+ if (tt_group == NULL) {
+ fprintf(LOGFILE, "Can't get configured value for %s.\n", TT_GROUP_KEY);
+ exit(INVALID_CONFIG_FILE);
+ }
+ struct group *group_info = getgrnam(tt_group);
+ if (group_info == NULL) {
+ fprintf(LOGFILE, "Can't get group information for %s - %s.\n", tt_group,
+ strerror(errno));
+ exit(INVALID_CONFIG_FILE);
+ }
+ set_tasktracker_uid(getuid(), group_info->gr_gid);
+ // if we are running from a setuid executable, make the real uid root
+ setuid(0);
+ // set the real and effective group id to the task tracker group
+ setgid(group_info->gr_gid);
+
+ if (check_taskcontroller_permissions(executable_file) != 0) {
+ fprintf(LOGFILE, "Invalid permissions on container-executor binary.\n");
+ return INVALID_TASKCONTROLLER_PERMISSIONS;
+ }
+
+ //checks done for user name
+ if (argv[optind] == NULL) {
+ fprintf(LOGFILE, "Invalid user name \n");
+ return INVALID_USER_NAME;
+ }
+ int ret = set_user(argv[optind]);
+ if (ret != 0) {
+ return ret;
+ }
+
+ optind = optind + 1;
+ command = atoi(argv[optind++]);
+
+ fprintf(LOGFILE, "main : command provided %d\n",command);
+ fprintf(LOGFILE, "main : user is %s\n", user_detail->pw_name);
+
+ switch (command) {
+ case INITIALIZE_JOB:
+ if (argc < 6) {
+ fprintf(LOGFILE, "Too few arguments (%d vs 6) for initialize job\n",
+ argc);
+ return INVALID_ARGUMENT_NUMBER;
+ }
+ job_id = argv[optind++];
+ cred_file = argv[optind++];
+ exit_code = initialize_job(user_detail->pw_name, job_id, cred_file,
+ argv + optind);
+ break;
+ case LAUNCH_TASK_JVM:
+ if (argc < 8) {
+ fprintf(LOGFILE, "Too few arguments (%d vs 8) for launch task\n",
+ argc);
+ return INVALID_ARGUMENT_NUMBER;
+ }
+ job_id = argv[optind++];
+ task_id = argv[optind++];
+ current_dir = argv[optind++];
+ script_file = argv[optind++];
+ cred_file = argv[optind++];
+ exit_code = run_task_as_user(user_detail->pw_name, job_id, task_id,
+ current_dir, script_file, cred_file);
+ break;
+ case SIGNAL_TASK:
+ if (argc < 5) {
+ fprintf(LOGFILE, "Too few arguments (%d vs 5) for signal task\n",
+ argc);
+ return INVALID_ARGUMENT_NUMBER;
+ } else {
+ char* end_ptr = NULL;
+ char* option = argv[optind++];
+ int task_pid = strtol(option, &end_ptr, 10);
+ if (option == end_ptr || *end_ptr != '\0') {
+ fprintf(LOGFILE, "Illegal argument for task pid %s\n", option);
+ return INVALID_ARGUMENT_NUMBER;
+ }
+ option = argv[optind++];
+ int signal = strtol(option, &end_ptr, 10);
+ if (option == end_ptr || *end_ptr != '\0') {
+ fprintf(LOGFILE, "Illegal argument for signal %s\n", option);
+ return INVALID_ARGUMENT_NUMBER;
+ }
+ exit_code = signal_user_task(user_detail->pw_name, task_pid, signal);
+ }
+ break;
+ case DELETE_AS_USER:
+ dir_to_be_deleted = argv[optind++];
+ exit_code= delete_as_user(user_detail->pw_name, dir_to_be_deleted,
+ argv + optind);
+ break;
+ default:
+ exit_code = INVALID_COMMAND_PROVIDED;
+ }
+ fclose(LOGFILE);
+ return exit_code;
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/test/test-task-controller.c b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/test/test-task-controller.c
new file mode 100644
index 0000000..bfbd0f1
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/test/test-task-controller.c
@@ -0,0 +1,763 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "configuration.h"
+#include "task-controller.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+
+#define TEST_ROOT "/tmp/test-task-controller"
+#define DONT_TOUCH_FILE "dont-touch-me"
+
+static char* username = NULL;
+
+/**
+ * Run the command using the effective user id.
+ * It can't use system, since bash seems to copy the real user id into the
+ * effective id.
+ */
+void run(const char *cmd) {
+ fflush(stdout);
+ fflush(stderr);
+ pid_t child = fork();
+ if (child == -1) {
+ printf("FAIL: failed to fork - %s\n", strerror(errno));
+ } else if (child == 0) {
+ char *cmd_copy = strdup(cmd);
+ char *ptr;
+ int words = 1;
+ for(ptr = strchr(cmd_copy, ' '); ptr; ptr = strchr(ptr+1, ' ')) {
+ words += 1;
+ }
+ char **argv = malloc(sizeof(char *) * (words + 1));
+ ptr = strtok(cmd_copy, " ");
+ int i = 0;
+ argv[i++] = ptr;
+ while (ptr != NULL) {
+ ptr = strtok(NULL, " ");
+ argv[i++] = ptr;
+ }
+ if (execvp(argv[0], argv) != 0) {
+ printf("FAIL: exec failed in child %s - %s\n", cmd, strerror(errno));
+ exit(42);
+ }
+ } else {
+ int status = 0;
+ if (waitpid(child, &status, 0) <= 0) {
+ printf("FAIL: failed waiting for child process %s pid %d - %s\n",
+ cmd, child, strerror(errno));
+ exit(1);
+ }
+ if (!WIFEXITED(status)) {
+ printf("FAIL: process %s pid %d did not exit\n", cmd, child);
+ exit(1);
+ }
+ if (WEXITSTATUS(status) != 0) {
+ printf("FAIL: process %s pid %d exited with error status %d\n", cmd,
+ child, WEXITSTATUS(status));
+ exit(1);
+ }
+ }
+}
+
+int write_config_file(char *file_name) {
+ FILE *file;
+ file = fopen(file_name, "w");
+ if (file == NULL) {
+ printf("Failed to open %s.\n", file_name);
+ return EXIT_FAILURE;
+ }
+ fprintf(file, "mapred.local.dir=" TEST_ROOT "/local-1");
+ int i;
+ for(i=2; i < 5; ++i) {
+ fprintf(file, "," TEST_ROOT "/local-%d", i);
+ }
+ fprintf(file, "\n");
+ fprintf(file, "hadoop.log.dir=" TEST_ROOT "/logs\n");
+ fclose(file);
+ return 0;
+}
+
+void create_tt_roots() {
+ char** tt_roots = get_values("mapred.local.dir");
+ char** tt_root;
+ for(tt_root=tt_roots; *tt_root != NULL; ++tt_root) {
+ if (mkdir(*tt_root, 0755) != 0) {
+ printf("FAIL: Can't create directory %s - %s\n", *tt_root,
+ strerror(errno));
+ exit(1);
+ }
+ char buffer[100000];
+ sprintf(buffer, "%s/taskTracker", *tt_root);
+ if (mkdir(buffer, 0755) != 0) {
+ printf("FAIL: Can't create directory %s - %s\n", buffer,
+ strerror(errno));
+ exit(1);
+ }
+ }
+ free_values(tt_roots);
+}
+
+void test_get_user_directory() {
+ char *user_dir = get_user_directory("/tmp", "user");
+ char *expected = "/tmp/taskTracker/user";
+ if (strcmp(user_dir, expected) != 0) {
+ printf("test_get_user_directory expected %s got %s\n", user_dir, expected);
+ exit(1);
+ }
+ free(user_dir);
+}
+
+void test_get_job_directory() {
+ char *expected = "/tmp/taskTracker/user/appcache/job_200906101234_0001";
+ char *job_dir = (char *) get_job_directory("/tmp", "user",
+ "job_200906101234_0001");
+ if (strcmp(job_dir, expected) != 0) {
+ exit(1);
+ }
+ free(job_dir);
+}
+
+void test_get_attempt_directory() {
+ char *attempt_dir = get_attempt_work_directory("/tmp", "owen", "job_1",
+ "attempt_1");
+ char *expected = "/tmp/taskTracker/owen/appcache/job_1/attempt_1/work";
+ if (strcmp(attempt_dir, expected) != 0) {
+ printf("Fail get_attempt_work_directory got %s expected %s\n",
+ attempt_dir, expected);
+ }
+ free(attempt_dir);
+}
+
+void test_get_task_launcher_file() {
+ char *expected_file = ("/tmp/taskTracker/user/appcache/job_200906101234_0001"
+ "/taskjvm.sh");
+ char *job_dir = get_job_directory("/tmp", "user",
+ "job_200906101234_0001");
+ char *task_file = get_task_launcher_file(job_dir);
+ if (strcmp(task_file, expected_file) != 0) {
+ printf("failure to match expected task file %s vs %s\n", task_file,
+ expected_file);
+ exit(1);
+ }
+ free(job_dir);
+ free(task_file);
+}
+
+void test_get_job_log_dir() {
+ char *expected = TEST_ROOT "/logs/userlogs/job_200906101234_0001";
+ char *logdir = get_job_log_directory("job_200906101234_0001");
+ if (strcmp(logdir, expected) != 0) {
+ printf("Fail get_job_log_dir got %s expected %s\n", logdir, expected);
+ exit(1);
+ }
+ free(logdir);
+}
+
+void test_get_task_log_dir() {
+ char *logdir = get_job_log_directory("job_5/task_4");
+ char *expected = TEST_ROOT "/logs/userlogs/job_5/task_4";
+ if (strcmp(logdir, expected) != 0) {
+ printf("FAIL: get_task_log_dir expected %s got %s\n", logdir, expected);
+ }
+ free(logdir);
+}
+
+void test_check_user() {
+ printf("\nTesting test_check_user\n");
+ struct passwd *user = check_user(username);
+ if (user == NULL) {
+ printf("FAIL: failed check for user %s\n", username);
+ exit(1);
+ }
+ free(user);
+ if (check_user("lp") != NULL) {
+ printf("FAIL: failed check for system user lp\n");
+ exit(1);
+ }
+ if (check_user("root") != NULL) {
+ printf("FAIL: failed check for system user root\n");
+ exit(1);
+ }
+ if (check_user("mapred") != NULL) {
+ printf("FAIL: failed check for hadoop user mapred\n");
+ exit(1);
+ }
+}
+
+void test_check_configuration_permissions() {
+ printf("\nTesting check_configuration_permissions\n");
+ if (check_configuration_permissions("/etc/passwd") != 0) {
+ printf("FAIL: failed permission check on /etc/passwd\n");
+ exit(1);
+ }
+ if (check_configuration_permissions(TEST_ROOT) == 0) {
+ printf("FAIL: failed permission check on %s\n", TEST_ROOT);
+ exit(1);
+ }
+}
+
+void test_delete_task() {
+ if (initialize_user(username)) {
+ printf("FAIL: failed to initialized user %s\n", username);
+ exit(1);
+ }
+ char* job_dir = get_job_directory(TEST_ROOT "/local-2", username, "job_1");
+ char* dont_touch = get_job_directory(TEST_ROOT "/local-2", username,
+ DONT_TOUCH_FILE);
+ char* task_dir = get_attempt_work_directory(TEST_ROOT "/local-2",
+ username, "job_1", "task_1");
+ char buffer[100000];
+ sprintf(buffer, "mkdir -p %s/who/let/the/dogs/out/who/who", task_dir);
+ run(buffer);
+ sprintf(buffer, "touch %s", dont_touch);
+ run(buffer);
+
+ // soft link to the canary file from the task directory
+ sprintf(buffer, "ln -s %s %s/who/softlink", dont_touch, task_dir);
+ run(buffer);
+ // hard link to the canary file from the task directory
+ sprintf(buffer, "ln %s %s/who/hardlink", dont_touch, task_dir);
+ run(buffer);
+ // create a dot file in the task directory
+ sprintf(buffer, "touch %s/who/let/.dotfile", task_dir);
+ run(buffer);
+ // create a no permission file
+ sprintf(buffer, "touch %s/who/let/protect", task_dir);
+ run(buffer);
+ sprintf(buffer, "chmod 000 %s/who/let/protect", task_dir);
+ run(buffer);
+ // create a no permission directory
+ sprintf(buffer, "chmod 000 %s/who/let", task_dir);
+ run(buffer);
+
+ // delete task directory
+ int ret = delete_as_user(username, "appcache/job_1/task_1");
+ if (ret != 0) {
+ printf("FAIL: return code from delete_as_user is %d\n", ret);
+ exit(1);
+ }
+
+ // check to make sure the task directory is gone
+ if (access(task_dir, R_OK) == 0) {
+ printf("FAIL: failed to delete the directory - %s\n", task_dir);
+ exit(1);
+ }
+ // check to make sure the job directory is not gone
+ if (access(job_dir, R_OK) != 0) {
+ printf("FAIL: accidently deleted the directory - %s\n", job_dir);
+ exit(1);
+ }
+ // but that the canary is not gone
+ if (access(dont_touch, R_OK) != 0) {
+ printf("FAIL: accidently deleted file %s\n", dont_touch);
+ exit(1);
+ }
+ sprintf(buffer, "chmod -R 700 %s", job_dir);
+ run(buffer);
+ sprintf(buffer, "rm -fr %s", job_dir);
+ run(buffer);
+ free(job_dir);
+ free(task_dir);
+ free(dont_touch);
+}
+
+void test_delete_job() {
+ char* job_dir = get_job_directory(TEST_ROOT "/local-2", username, "job_2");
+ char* dont_touch = get_job_directory(TEST_ROOT "/local-2", username,
+ DONT_TOUCH_FILE);
+ char* task_dir = get_attempt_work_directory(TEST_ROOT "/local-2",
+ username, "job_2", "task_1");
+ char buffer[100000];
+ sprintf(buffer, "mkdir -p %s/who/let/the/dogs/out/who/who", task_dir);
+ run(buffer);
+ sprintf(buffer, "touch %s", dont_touch);
+ run(buffer);
+
+ // soft link to the canary file from the task directory
+ sprintf(buffer, "ln -s %s %s/who/softlink", dont_touch, task_dir);
+ run(buffer);
+ // hard link to the canary file from the task directory
+ sprintf(buffer, "ln %s %s/who/hardlink", dont_touch, task_dir);
+ run(buffer);
+ // create a dot file in the task directory
+ sprintf(buffer, "touch %s/who/let/.dotfile", task_dir);
+ run(buffer);
+ // create a no permission file
+ sprintf(buffer, "touch %s/who/let/protect", task_dir);
+ run(buffer);
+ sprintf(buffer, "chmod 000 %s/who/let/protect", task_dir);
+ run(buffer);
+ // create a no permission directory
+ sprintf(buffer, "chmod 000 %s/who/let", task_dir);
+ run(buffer);
+
+ // delete task directory
+ int ret = delete_as_user(username, "appcache/job_2");
+ if (ret != 0) {
+ printf("FAIL: return code from delete_as_user is %d\n", ret);
+ exit(1);
+ }
+
+ // check to make sure the task directory is gone
+ if (access(task_dir, R_OK) == 0) {
+ printf("FAIL: failed to delete the directory - %s\n", task_dir);
+ exit(1);
+ }
+ // check to make sure the job directory is gone
+ if (access(job_dir, R_OK) == 0) {
+ printf("FAIL: didn't delete the directory - %s\n", job_dir);
+ exit(1);
+ }
+ // but that the canary is not gone
+ if (access(dont_touch, R_OK) != 0) {
+ printf("FAIL: accidently deleted file %s\n", dont_touch);
+ exit(1);
+ }
+ free(job_dir);
+ free(task_dir);
+ free(dont_touch);
+}
+
+
+void test_delete_user() {
+ printf("\nTesting delete_user\n");
+ char* job_dir = get_job_directory(TEST_ROOT "/local-1", username, "job_3");
+ if (mkdirs(job_dir, 0700) != 0) {
+ exit(1);
+ }
+ char buffer[100000];
+ sprintf(buffer, "%s/local-1/taskTracker/%s", TEST_ROOT, username);
+ if (access(buffer, R_OK) != 0) {
+ printf("FAIL: directory missing before test\n");
+ exit(1);
+ }
+ if (delete_as_user(username, "") != 0) {
+ exit(1);
+ }
+ if (access(buffer, R_OK) == 0) {
+ printf("FAIL: directory not deleted\n");
+ exit(1);
+ }
+ if (access(TEST_ROOT "/local-1", R_OK) != 0) {
+ printf("FAIL: local-1 directory does not exist\n");
+ exit(1);
+ }
+ free(job_dir);
+}
+
+void test_delete_log_directory() {
+ printf("\nTesting delete_log_directory\n");
+ char *job_log_dir = get_job_log_directory("job_1");
+ if (job_log_dir == NULL) {
+ exit(1);
+ }
+ if (create_directory_for_user(job_log_dir) != 0) {
+ exit(1);
+ }
+ free(job_log_dir);
+ char *task_log_dir = get_job_log_directory("job_1/task_2");
+ if (task_log_dir == NULL) {
+ exit(1);
+ }
+ if (mkdirs(task_log_dir, 0700) != 0) {
+ exit(1);
+ }
+ if (access(TEST_ROOT "/logs/userlogs/job_1/task_2", R_OK) != 0) {
+ printf("FAIL: can't access task directory - %s\n", strerror(errno));
+ exit(1);
+ }
+ if (delete_log_directory("job_1/task_2") != 0) {
+ printf("FAIL: can't delete task directory\n");
+ exit(1);
+ }
+ if (access(TEST_ROOT "/logs/userlogs/job_1/task_2", R_OK) == 0) {
+ printf("FAIL: task directory not deleted\n");
+ exit(1);
+ }
+ if (access(TEST_ROOT "/logs/userlogs/job_1", R_OK) != 0) {
+ printf("FAIL: job directory not deleted - %s\n", strerror(errno));
+ exit(1);
+ }
+ if (delete_log_directory("job_1") != 0) {
+ printf("FAIL: can't delete task directory\n");
+ exit(1);
+ }
+ if (access(TEST_ROOT "/logs/userlogs/job_1", R_OK) == 0) {
+ printf("FAIL: job directory not deleted\n");
+ exit(1);
+ }
+ free(task_log_dir);
+}
+
+void run_test_in_child(const char* test_name, void (*func)()) {
+ printf("\nRunning test %s in child process\n", test_name);
+ fflush(stdout);
+ fflush(stderr);
+ pid_t child = fork();
+ if (child == -1) {
+ printf("FAIL: fork failed\n");
+ exit(1);
+ } else if (child == 0) {
+ func();
+ exit(0);
+ } else {
+ int status = 0;
+ if (waitpid(child, &status, 0) == -1) {
+ printf("FAIL: waitpid %d failed - %s\n", child, strerror(errno));
+ exit(1);
+ }
+ if (!WIFEXITED(status)) {
+ printf("FAIL: child %d didn't exit - %d\n", child, status);
+ exit(1);
+ }
+ if (WEXITSTATUS(status) != 0) {
+ printf("FAIL: child %d exited with bad status %d\n",
+ child, WEXITSTATUS(status));
+ exit(1);
+ }
+ }
+}
+
+void test_signal_task() {
+ printf("\nTesting signal_task\n");
+ fflush(stdout);
+ fflush(stderr);
+ pid_t child = fork();
+ if (child == -1) {
+ printf("FAIL: fork failed\n");
+ exit(1);
+ } else if (child == 0) {
+ if (change_user(user_detail->pw_uid, user_detail->pw_gid) != 0) {
+ exit(1);
+ }
+ sleep(3600);
+ exit(0);
+ } else {
+ printf("Child task launched as %d\n", child);
+ if (signal_user_task(username, child, SIGQUIT) != 0) {
+ exit(1);
+ }
+ int status = 0;
+ if (waitpid(child, &status, 0) == -1) {
+ printf("FAIL: waitpid failed - %s\n", strerror(errno));
+ exit(1);
+ }
+ if (!WIFSIGNALED(status)) {
+ printf("FAIL: child wasn't signalled - %d\n", status);
+ exit(1);
+ }
+ if (WTERMSIG(status) != SIGQUIT) {
+ printf("FAIL: child was killed with %d instead of %d\n",
+ WTERMSIG(status), SIGQUIT);
+ exit(1);
+ }
+ }
+}
+
+void test_signal_task_group() {
+ printf("\nTesting group signal_task\n");
+ fflush(stdout);
+ fflush(stderr);
+ pid_t child = fork();
+ if (child == -1) {
+ printf("FAIL: fork failed\n");
+ exit(1);
+ } else if (child == 0) {
+ setpgrp();
+ if (change_user(user_detail->pw_uid, user_detail->pw_gid) != 0) {
+ exit(1);
+ }
+ sleep(3600);
+ exit(0);
+ }
+ printf("Child task launched as %d\n", child);
+ if (signal_user_task(username, child, SIGKILL) != 0) {
+ exit(1);
+ }
+ int status = 0;
+ if (waitpid(child, &status, 0) == -1) {
+ printf("FAIL: waitpid failed - %s\n", strerror(errno));
+ exit(1);
+ }
+ if (!WIFSIGNALED(status)) {
+ printf("FAIL: child wasn't signalled - %d\n", status);
+ exit(1);
+ }
+ if (WTERMSIG(status) != SIGKILL) {
+ printf("FAIL: child was killed with %d instead of %d\n",
+ WTERMSIG(status), SIGKILL);
+ exit(1);
+ }
+}
+
+void test_init_job() {
+ printf("\nTesting init job\n");
+ if (seteuid(0) != 0) {
+ printf("FAIL: seteuid to root failed - %s\n", strerror(errno));
+ exit(1);
+ }
+ FILE* creds = fopen(TEST_ROOT "/creds.txt", "w");
+ if (creds == NULL) {
+ printf("FAIL: failed to create credentials file - %s\n", strerror(errno));
+ exit(1);
+ }
+ if (fprintf(creds, "secret key\n") < 0) {
+ printf("FAIL: fprintf failed - %s\n", strerror(errno));
+ exit(1);
+ }
+ if (fclose(creds) != 0) {
+ printf("FAIL: fclose failed - %s\n", strerror(errno));
+ exit(1);
+ }
+ FILE* job_xml = fopen(TEST_ROOT "/job.xml", "w");
+ if (job_xml == NULL) {
+ printf("FAIL: failed to create job file - %s\n", strerror(errno));
+ exit(1);
+ }
+ if (fprintf(job_xml, "<jobconf/>\n") < 0) {
+ printf("FAIL: fprintf failed - %s\n", strerror(errno));
+ exit(1);
+ }
+ if (fclose(job_xml) != 0) {
+ printf("FAIL: fclose failed - %s\n", strerror(errno));
+ exit(1);
+ }
+ if (seteuid(user_detail->pw_uid) != 0) {
+ printf("FAIL: failed to seteuid back to user - %s\n", strerror(errno));
+ exit(1);
+ }
+ fflush(stdout);
+ fflush(stderr);
+ pid_t child = fork();
+ if (child == -1) {
+ printf("FAIL: failed to fork process for init_job - %s\n",
+ strerror(errno));
+ exit(1);
+ } else if (child == 0) {
+ char *final_pgm[] = {"touch", "my-touch-file", 0};
+ if (initialize_job(username, "job_4", TEST_ROOT "/creds.txt",
+ TEST_ROOT "/job.xml", final_pgm) != 0) {
+ printf("FAIL: failed in child\n");
+ exit(42);
+ }
+ // should never return
+ exit(1);
+ }
+ int status = 0;
+ if (waitpid(child, &status, 0) <= 0) {
+ printf("FAIL: failed waiting for process %d - %s\n", child,
+ strerror(errno));
+ exit(1);
+ }
+ if (access(TEST_ROOT "/logs/userlogs/job_4", R_OK) != 0) {
+ printf("FAIL: failed to create job log directory\n");
+ exit(1);
+ }
+ char* job_dir = get_job_directory(TEST_ROOT "/local-1", username, "job_4");
+ if (access(job_dir, R_OK) != 0) {
+ printf("FAIL: failed to create job directory %s\n", job_dir);
+ exit(1);
+ }
+ char buffer[100000];
+ sprintf(buffer, "%s/jobToken", job_dir);
+ if (access(buffer, R_OK) != 0) {
+ printf("FAIL: failed to create credentials %s\n", buffer);
+ exit(1);
+ }
+ sprintf(buffer, "%s/my-touch-file", job_dir);
+ if (access(buffer, R_OK) != 0) {
+ printf("FAIL: failed to create touch file %s\n", buffer);
+ exit(1);
+ }
+ free(job_dir);
+ job_dir = get_job_log_directory("job_4");
+ if (access(job_dir, R_OK) != 0) {
+ printf("FAIL: failed to create job log directory %s\n", job_dir);
+ exit(1);
+ }
+ free(job_dir);
+}
+
+void test_run_task() {
+ printf("\nTesting run task\n");
+ if (seteuid(0) != 0) {
+ printf("FAIL: seteuid to root failed - %s\n", strerror(errno));
+ exit(1);
+ }
+ const char* script_name = TEST_ROOT "/task-script";
+ FILE* script = fopen(script_name, "w");
+ if (script == NULL) {
+ printf("FAIL: failed to create script file - %s\n", strerror(errno));
+ exit(1);
+ }
+ if (seteuid(user_detail->pw_uid) != 0) {
+ printf("FAIL: failed to seteuid back to user - %s\n", strerror(errno));
+ exit(1);
+ }
+ if (fprintf(script, "#!/bin/bash\n"
+ "touch foobar\n"
+ "exit 0") < 0) {
+ printf("FAIL: fprintf failed - %s\n", strerror(errno));
+ exit(1);
+ }
+ if (fclose(script) != 0) {
+ printf("FAIL: fclose failed - %s\n", strerror(errno));
+ exit(1);
+ }
+ fflush(stdout);
+ fflush(stderr);
+ char* task_dir = get_attempt_work_directory(TEST_ROOT "/local-1",
+ username, "job_4", "task_1");
+ pid_t child = fork();
+ if (child == -1) {
+ printf("FAIL: failed to fork process for init_job - %s\n",
+ strerror(errno));
+ exit(1);
+ } else if (child == 0) {
+ if (run_task_as_user(username, "job_4", "task_1",
+ task_dir, script_name) != 0) {
+ printf("FAIL: failed in child\n");
+ exit(42);
+ }
+ // should never return
+ exit(1);
+ }
+ int status = 0;
+ if (waitpid(child, &status, 0) <= 0) {
+ printf("FAIL: failed waiting for process %d - %s\n", child,
+ strerror(errno));
+ exit(1);
+ }
+ if (access(TEST_ROOT "/logs/userlogs/job_4/task_1", R_OK) != 0) {
+ printf("FAIL: failed to create task log directory\n");
+ exit(1);
+ }
+ if (access(task_dir, R_OK) != 0) {
+ printf("FAIL: failed to create task directory %s\n", task_dir);
+ exit(1);
+ }
+ char buffer[100000];
+ sprintf(buffer, "%s/foobar", task_dir);
+ if (access(buffer, R_OK) != 0) {
+ printf("FAIL: failed to create touch file %s\n", buffer);
+ exit(1);
+ }
+ free(task_dir);
+ task_dir = get_job_log_directory("job_4/task_1");
+ if (access(task_dir, R_OK) != 0) {
+ printf("FAIL: failed to create job log directory %s\n", task_dir);
+ exit(1);
+ }
+ free(task_dir);
+}
+
+int main(int argc, char **argv) {
+ LOGFILE = stdout;
+ int my_username = 0;
+
+ // clean up any junk from previous run
+ system("chmod -R u=rwx " TEST_ROOT "; rm -fr " TEST_ROOT);
+
+ if (mkdirs(TEST_ROOT "/logs/userlogs", 0755) != 0) {
+ exit(1);
+ }
+
+ if (write_config_file(TEST_ROOT "/test.cfg") != 0) {
+ exit(1);
+ }
+ read_config(TEST_ROOT "/test.cfg");
+
+ create_tt_roots();
+
+ if (getuid() == 0 && argc == 2) {
+ username = argv[1];
+ } else {
+ username = strdup(getpwuid(getuid())->pw_name);
+ my_username = 1;
+ }
+ set_tasktracker_uid(geteuid(), getegid());
+
+ if (set_user(username)) {
+ exit(1);
+ }
+
+ printf("\nStarting tests\n");
+
+ printf("\nTesting get_user_directory()\n");
+ test_get_user_directory();
+
+ printf("\nTesting get_job_directory()\n");
+ test_get_job_directory();
+
+ printf("\nTesting get_attempt_directory()\n");
+ test_get_attempt_directory();
+
+ printf("\nTesting get_task_launcher_file()\n");
+ test_get_task_launcher_file();
+
+ printf("\nTesting get_job_log_dir()\n");
+ test_get_job_log_dir();
+
+ test_check_configuration_permissions();
+
+ printf("\nTesting get_task_log_dir()\n");
+ test_get_task_log_dir();
+
+ printf("\nTesting delete_task()\n");
+ test_delete_task();
+
+ printf("\nTesting delete_job()\n");
+ test_delete_job();
+
+ test_delete_user();
+
+ test_check_user();
+
+ test_delete_log_directory();
+
+ // the tests that change user need to be run in a subshell, so that
+ // when they change user they don't give up our privs
+ run_test_in_child("test_signal_task", test_signal_task);
+ run_test_in_child("test_signal_task_group", test_signal_task_group);
+
+ // init job and run task can't be run if you aren't testing as root
+ if (getuid() == 0) {
+ // these tests do internal forks so that the change_owner and execs
+ // don't mess up our process.
+ test_init_job();
+ test_run_task();
+ }
+
+ seteuid(0);
+ run("rm -fr " TEST_ROOT);
+ printf("\nFinished tests\n");
+
+ if (my_username) {
+ free(username);
+ }
+ free_configurations();
+ return 0;
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedAppsEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedAppsEvent.java
new file mode 100644
index 0000000..19be39f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedAppsEvent.java
@@ -0,0 +1,39 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerManagerEvent;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerManagerEventType;
+
+public class CMgrCompletedAppsEvent extends ContainerManagerEvent {
+
+ private final List<ApplicationId> appsToCleanup;
+
+ public CMgrCompletedAppsEvent(List<ApplicationId> appsToCleanup) {
+ super(ContainerManagerEventType.FINISH_APPS);
+ this.appsToCleanup = appsToCleanup;
+ }
+
+ public List<ApplicationId> getAppsToCleanup() {
+ return this.appsToCleanup;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedContainersEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedContainersEvent.java
new file mode 100644
index 0000000..28e9632
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedContainersEvent.java
@@ -0,0 +1,37 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+
+public class CMgrCompletedContainersEvent extends ContainerManagerEvent {
+
+ private List<ContainerId> containerToCleanup;
+
+ public CMgrCompletedContainersEvent(List<ContainerId> containersToCleanup) {
+ super(ContainerManagerEventType.FINISH_CONTAINERS);
+ this.containerToCleanup = containersToCleanup;
+ }
+
+ public List<ContainerId> getContainersToCleanup() {
+ return this.containerToCleanup;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
new file mode 100644
index 0000000..5e2de2b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -0,0 +1,228 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+
+import java.net.InetSocketAddress;
+import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+
+public abstract class ContainerExecutor implements Configurable {
+
+ private static final Log LOG = LogFactory.getLog(ContainerExecutor.class);
+ final public static FsPermission TASK_LAUNCH_SCRIPT_PERMISSION =
+ FsPermission.createImmutable((short) 0700);
+
+ private Configuration conf;
+ protected ConcurrentMap<ContainerId, ShellCommandExecutor> launchCommandObjs =
+ new ConcurrentHashMap<ContainerId, ShellCommandExecutor>();
+
+ @Override
+ public void setConf(Configuration conf) {
+ this.conf = conf;
+ }
+
+ @Override
+ public Configuration getConf() {
+ return conf;
+ }
+
+ /**
+ * Prepare the environment for containers in this application to execute.
+ * For $x in local.dirs
+ * create $x/$user/$appId
+ * Copy $nmLocal/appTokens -> $N/$user/$appId
+ * For $rsrc in private resources
+ * Copy $rsrc -> $N/$user/filecache/[idef]
+ * For $rsrc in job resources
+ * Copy $rsrc -> $N/$user/$appId/filecache/idef
+ * @param user user name of application owner
+ * @param appId id of the application
+ * @param nmPrivateContainerTokens path to localized credentials, rsrc by NM
+ * @param nmAddr RPC address to contact NM
+ * @throws IOException For most application init failures
+ * @throws InterruptedException If application init thread is halted by NM
+ */
+ public abstract void startLocalizer(Path nmPrivateContainerTokens,
+ InetSocketAddress nmAddr, String user, String appId, String locId,
+ List<Path> localDirs)
+ throws IOException, InterruptedException;
+
+ /**
+ * Launch the container on the node. This is a blocking call and returns only
+ * when the container exits.
+ *
+ * @param launchCtxt
+ */
+ public abstract int launchContainer(Container container,
+ Path nmPrivateContainerScriptPath, Path nmPrivateTokensPath,
+ String user, String appId, Path containerWorkDir) throws IOException;
+
+ public abstract boolean signalContainer(String user, String pid,
+ Signal signal)
+ throws IOException;
+
+ public abstract void deleteAsUser(String user, Path subDir, Path... basedirs)
+ throws IOException, InterruptedException;
+
+ public enum ExitCode {
+ KILLED(137);
+ private final int code;
+
+ private ExitCode(int exitCode) {
+ this.code = exitCode;
+ }
+
+ public int getExitCode() {
+ return code;
+ }
+
+ @Override
+ public String toString() {
+ return String.valueOf(code);
+ }
+ }
+
+ /**
+ * The constants for the signals.
+ */
+ public enum Signal {
+ NULL(0, "NULL"), QUIT(3, "SIGQUIT"),
+ KILL(9, "SIGKILL"), TERM(15, "SIGTERM");
+ private final int value;
+ private final String str;
+ private Signal(int value, String str) {
+ this.str = str;
+ this.value = value;
+ }
+ public int getValue() {
+ return value;
+ }
+ @Override
+ public String toString() {
+ return str;
+ }
+ }
+
+ protected void logOutput(String output) {
+ String shExecOutput = output;
+ if (shExecOutput != null) {
+ for (String str : shExecOutput.split("\n")) {
+ LOG.info(str);
+ }
+ }
+ }
+
+ /**
+ * Get the process-identifier for the container
+ *
+ * @param containerID
+ * @return the processid of the container if it has already launched,
+ * otherwise return null
+ */
+ public String getProcessId(ContainerId containerID) {
+ String pid = null;
+ ShellCommandExecutor shExec = launchCommandObjs.get(containerID);
+ if (shExec == null) {
+ // This container isn't even launched yet.
+ return pid;
+ }
+ Process proc = shExec.getProcess();
+ if (proc == null) {
+ // This happens if the command is not yet started
+ return pid;
+ }
+ try {
+ Field pidField = proc.getClass().getDeclaredField("pid");
+ pidField.setAccessible(true);
+ pid = ((Integer) pidField.get(proc)).toString();
+ } catch (SecurityException e) {
+ // SecurityManager not expected with yarn. Ignore.
+ } catch (NoSuchFieldException e) {
+ // Yarn only on UNIX for now. Ignore.
+ } catch (IllegalArgumentException e) {
+ ;
+ } catch (IllegalAccessException e) {
+ ;
+ }
+ return pid;
+ }
+
+ public static final boolean isSetsidAvailable = isSetsidSupported();
+ private static boolean isSetsidSupported() {
+ ShellCommandExecutor shexec = null;
+ boolean setsidSupported = true;
+ try {
+ String[] args = {"setsid", "bash", "-c", "echo $$"};
+ shexec = new ShellCommandExecutor(args);
+ shexec.execute();
+ } catch (IOException ioe) {
+ LOG.warn("setsid is not available on this machine. So not using it.");
+ setsidSupported = false;
+ } finally { // handle the exit code
+ LOG.info("setsid exited with exit code " + shexec.getExitCode());
+ }
+ return setsidSupported;
+ }
+
+ public static class DelayedProcessKiller extends Thread {
+ private final String user;
+ private final String pid;
+ private final long delay;
+ private final Signal signal;
+ private final ContainerExecutor containerExecutor;
+
+ public DelayedProcessKiller(String user, String pid, long delay,
+ Signal signal,
+ ContainerExecutor containerExecutor) {
+ this.user = user;
+ this.pid = pid;
+ this.delay = delay;
+ this.signal = signal;
+ this.containerExecutor = containerExecutor;
+ setName("Task killer for " + pid);
+ setDaemon(false);
+ }
+ @Override
+ public void run() {
+ try {
+ Thread.sleep(delay);
+ containerExecutor.signalContainer(user, pid, signal);
+ } catch (InterruptedException e) {
+ return;
+ } catch (IOException e) {
+ LOG.warn("Exception when killing task " + pid, e);
+ }
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerManagerEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerManagerEvent.java
new file mode 100644
index 0000000..454f6ce
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerManagerEvent.java
@@ -0,0 +1,29 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import org.apache.hadoop.yarn.event.AbstractEvent;
+
+public class ContainerManagerEvent extends
+ AbstractEvent<ContainerManagerEventType> {
+
+ public ContainerManagerEvent(ContainerManagerEventType type) {
+ super(type);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerManagerEventType.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerManagerEventType.java
new file mode 100644
index 0000000..e2a84df
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerManagerEventType.java
@@ -0,0 +1,24 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+public enum ContainerManagerEventType {
+ FINISH_APPS,
+ FINISH_CONTAINERS
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java
new file mode 100644
index 0000000..b869729
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java
@@ -0,0 +1,40 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+
+/**
+ * Context interface for sharing information across components in the
+ * NodeManager.
+ */
+public interface Context {
+
+ ConcurrentMap<ApplicationId, Application> getApplications();
+
+ ConcurrentMap<ContainerId, Container> getContainers();
+
+ NodeHealthStatus getNodeHealthStatus();
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
new file mode 100644
index 0000000..46e5f96
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
@@ -0,0 +1,413 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Shell.ExitCodeException;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+public class DefaultContainerExecutor extends ContainerExecutor {
+
+ private static final Log LOG = LogFactory
+ .getLog(DefaultContainerExecutor.class);
+
+ private final FileContext lfs;
+
+ public DefaultContainerExecutor() {
+ try {
+ this.lfs = FileContext.getLocalFSFileContext();
+ } catch (UnsupportedFileSystemException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ DefaultContainerExecutor(FileContext lfs) {
+ this.lfs = lfs;
+ }
+
+ @Override
+ public void startLocalizer(Path nmPrivateContainerTokensPath,
+ InetSocketAddress nmAddr, String user, String appId, String locId,
+ List<Path> localDirs) throws IOException, InterruptedException {
+
+ ContainerLocalizer localizer =
+ new ContainerLocalizer(this.lfs, user, appId, locId,
+ localDirs, RecordFactoryProvider.getRecordFactory(getConf()));
+
+ createUserLocalDirs(localDirs, user);
+ createUserCacheDirs(localDirs, user);
+ createAppDirs(localDirs, user, appId);
+ createAppLogDirs(appId);
+
+ // TODO: Why pick first app dir. The same in LCE why not random?
+ Path appStorageDir = getFirstApplicationDir(localDirs, user, appId);
+
+ String tokenFn = String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, locId);
+ Path tokenDst = new Path(appStorageDir, tokenFn);
+ lfs.util().copy(nmPrivateContainerTokensPath, tokenDst);
+ lfs.setWorkingDirectory(appStorageDir);
+
+ // TODO: DO it over RPC for maintaining similarity?
+ localizer.runLocalization(nmAddr);
+ }
+
+ @Override
+ public int launchContainer(Container container,
+ Path nmPrivateContainerScriptPath, Path nmPrivateTokensPath,
+ String userName, String appId, Path containerWorkDir)
+ throws IOException {
+
+ ContainerId containerId = container.getContainerID();
+
+ // create container dirs on all disks
+ String containerIdStr = ConverterUtils.toString(containerId);
+ String appIdStr =
+ ConverterUtils.toString(container.getContainerID().getAppId());
+ String[] sLocalDirs =
+ getConf().getStrings(NMConfig.NM_LOCAL_DIR, NMConfig.DEFAULT_NM_LOCAL_DIR);
+ for (String sLocalDir : sLocalDirs) {
+ Path usersdir = new Path(sLocalDir, ContainerLocalizer.USERCACHE);
+ Path userdir = new Path(usersdir, userName);
+ Path appCacheDir = new Path(userdir, ContainerLocalizer.APPCACHE);
+ Path appDir = new Path(appCacheDir, appIdStr);
+ Path containerDir = new Path(appDir, containerIdStr);
+ lfs.mkdir(containerDir, null, false);
+ }
+
+ // Create the container log-dirs on all disks
+ createContainerLogDirs(appIdStr, containerIdStr);
+
+ // copy launch script to work dir
+ Path launchDst =
+ new Path(containerWorkDir, ContainerLaunch.CONTAINER_SCRIPT);
+ lfs.util().copy(nmPrivateContainerScriptPath, launchDst);
+
+ // copy container tokens to work dir
+ Path tokenDst =
+ new Path(containerWorkDir, ContainerLaunch.FINAL_CONTAINER_TOKENS_FILE);
+ lfs.util().copy(nmPrivateTokensPath, tokenDst);
+
+ // create log dir under app
+ // fork script
+ ShellCommandExecutor shExec = null;
+ try {
+ lfs.setPermission(launchDst,
+ ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION);
+ String[] command =
+ new String[] { "bash", "-c", launchDst.toUri().getPath().toString() };
+ LOG.info("launchContainer: " + Arrays.toString(command));
+ shExec = new ShellCommandExecutor(command,
+ new File(containerWorkDir.toUri().getPath()));
+ launchCommandObjs.put(containerId, shExec);
+ shExec.execute();
+ } catch (IOException e) {
+ if (null == shExec) {
+ return -1;
+ }
+ int exitCode = shExec.getExitCode();
+ LOG.warn("Exit code from task is : " + exitCode);
+ String message = shExec.getOutput();
+ logOutput(message);
+ container.handle(new ContainerDiagnosticsUpdateEvent(containerId,
+ message));
+ return exitCode;
+ } finally {
+ launchCommandObjs.remove(containerId);
+ }
+ return 0;
+ }
+
+ @Override
+ public boolean signalContainer(String user, String pid, Signal signal)
+ throws IOException {
+ final String sigpid = ContainerExecutor.isSetsidAvailable
+ ? "-" + pid
+ : pid;
+ try {
+ sendSignal(sigpid, Signal.NULL);
+ } catch (ExitCodeException e) {
+ return false;
+ }
+ try {
+ sendSignal(sigpid, signal);
+ } catch (IOException e) {
+ try {
+ sendSignal(sigpid, Signal.NULL);
+ } catch (IOException ignore) {
+ return false;
+ }
+ throw e;
+ }
+ return true;
+ }
+
+ /**
+ * Send a specified signal to the specified pid
+ *
+ * @param pid the pid of the process [group] to signal.
+ * @param signal signal to send
+ * (for logging).
+ */
+ protected void sendSignal(String pid, Signal signal) throws IOException {
+ ShellCommandExecutor shexec = null;
+ String[] arg = { "kill", "-" + signal.getValue(), pid };
+ shexec = new ShellCommandExecutor(arg);
+ shexec.execute();
+ }
+
+ @Override
+ public void deleteAsUser(String user, Path subDir, Path... baseDirs)
+ throws IOException, InterruptedException {
+ if (baseDirs == null || baseDirs.length == 0) {
+ LOG.info("Deleting absolute path : " + subDir);
+ lfs.delete(subDir, true);
+ return;
+ }
+ for (Path baseDir : baseDirs) {
+ Path del = subDir == null ? baseDir : new Path(baseDir, subDir);
+ LOG.info("Deleting path : " + del);
+ lfs.delete(del, true);
+ }
+ }
+
+ /** Permissions for user dir.
+ * $loaal.dir/usercache/$user */
+ private static final short USER_PERM = (short)0750;
+ /** Permissions for user appcache dir.
+ * $loaal.dir/usercache/$user/appcache */
+ private static final short APPCACHE_PERM = (short)0710;
+ /** Permissions for user filecache dir.
+ * $loaal.dir/usercache/$user/filecache */
+ private static final short FILECACHE_PERM = (short)0710;
+ /** Permissions for user app dir.
+ * $loaal.dir/usercache/$user/filecache */
+ private static final short APPDIR_PERM = (short)0710;
+ /** Permissions for user log dir.
+ * $logdir/$user/$appId */
+ private static final short LOGDIR_PERM = (short)0710;
+
+ private Path getFirstApplicationDir(List<Path> localDirs, String user,
+ String appId) {
+ return getApplicationDir(localDirs.get(0), user, appId);
+ }
+
+ private Path getApplicationDir(Path base, String user, String appId) {
+ return new Path(getAppcacheDir(base, user), appId);
+ }
+
+ private Path getUserCacheDir(Path base, String user) {
+ return new Path(new Path(base, ContainerLocalizer.USERCACHE), user);
+ }
+
+ private Path getAppcacheDir(Path base, String user) {
+ return new Path(getUserCacheDir(base, user),
+ ContainerLocalizer.APPCACHE);
+ }
+
+ private Path getFileCacheDir(Path base, String user) {
+ return new Path(getUserCacheDir(base, user),
+ ContainerLocalizer.FILECACHE);
+ }
+
+ /**
+ * Initialize the local directories for a particular user.
+ * <ul>
+ * <li>$local.dir/usercache/$user</li>
+ * </ul>
+ */
+ private void createUserLocalDirs(List<Path> localDirs, String user)
+ throws IOException {
+ boolean userDirStatus = false;
+ FsPermission userperms = new FsPermission(USER_PERM);
+ for (Path localDir : localDirs) {
+ // create $local.dir/usercache/$user and its immediate parent
+ try {
+ lfs.mkdir(getUserCacheDir(localDir, user), userperms, true);
+ } catch (IOException e) {
+ LOG.warn("Unable to create the user directory : " + localDir, e);
+ continue;
+ }
+ userDirStatus = true;
+ }
+ if (!userDirStatus) {
+ throw new IOException("Not able to initialize user directories "
+ + "in any of the configured local directories for user " + user);
+ }
+ }
+
+
+ /**
+ * Initialize the local cache directories for a particular user.
+ * <ul>
+ * <li>$local.dir/usercache/$user</li>
+ * <li>$local.dir/usercache/$user/appcache</li>
+ * <li>$local.dir/usercache/$user/filecache</li>
+ * </ul>
+ */
+ private void createUserCacheDirs(List<Path> localDirs, String user)
+ throws IOException {
+ LOG.info("Initializing user " + user);
+
+ boolean appcacheDirStatus = false;
+ boolean distributedCacheDirStatus = false;
+ FsPermission appCachePerms = new FsPermission(APPCACHE_PERM);
+ FsPermission fileperms = new FsPermission(FILECACHE_PERM);
+
+ for (Path localDir : localDirs) {
+ // create $local.dir/usercache/$user/appcache
+ final Path appDir = getAppcacheDir(localDir, user);
+ try {
+ lfs.mkdir(appDir, appCachePerms, true);
+ appcacheDirStatus = true;
+ } catch (IOException e) {
+ LOG.warn("Unable to create app cache directory : " + appDir, e);
+ }
+ // create $local.dir/usercache/$user/filecache
+ final Path distDir = getFileCacheDir(localDir, user);
+ try {
+ lfs.mkdir(distDir, fileperms, true);
+ distributedCacheDirStatus = true;
+ } catch (IOException e) {
+ LOG.warn("Unable to create file cache directory : " + distDir, e);
+ }
+ }
+ if (!appcacheDirStatus) {
+ throw new IOException("Not able to initialize app-cache directories "
+ + "in any of the configured local directories for user " + user);
+ }
+ if (!distributedCacheDirStatus) {
+ throw new IOException(
+ "Not able to initialize distributed-cache directories "
+ + "in any of the configured local directories for user "
+ + user);
+ }
+ }
+
+ /**
+ * Initialize the local directories for a particular user.
+ * <ul>
+ * <li>$local.dir/usercache/$user/appcache/$appid</li>
+ * </ul>
+ * @param localDirs
+ */
+ private void createAppDirs(List<Path> localDirs, String user, String appId)
+ throws IOException {
+ boolean initAppDirStatus = false;
+ FsPermission appperms = new FsPermission(APPDIR_PERM);
+ for (Path localDir : localDirs) {
+ Path fullAppDir = getApplicationDir(localDir, user, appId);
+ if (lfs.util().exists(fullAppDir)) {
+ // this will happen on a partial execution of localizeJob. Sometimes
+ // copying job.xml to the local disk succeeds but copying job.jar might
+ // throw out an exception. We should clean up and then try again.
+ lfs.delete(fullAppDir, true);
+ }
+ // create $local.dir/usercache/$user/appcache/$appId
+ try {
+ lfs.mkdir(fullAppDir, appperms, true);
+ initAppDirStatus = true;
+ } catch (IOException e) {
+ LOG.warn("Unable to create app directory " + fullAppDir.toString(), e);
+ }
+ }
+ if (!initAppDirStatus) {
+ throw new IOException("Not able to initialize app directories "
+ + "in any of the configured local directories for app "
+ + appId.toString());
+ }
+ }
+
+ /**
+ * Create application log directories on all disks.
+ */
+ private void createAppLogDirs(String appId)
+ throws IOException {
+ String[] rootLogDirs =
+ getConf()
+ .getStrings(NMConfig.NM_LOG_DIR, NMConfig.DEFAULT_NM_LOG_DIR);
+
+ boolean appLogDirStatus = false;
+ FsPermission appLogDirPerms = new FsPermission(LOGDIR_PERM);
+ for (String rootLogDir : rootLogDirs) {
+ // create $log.dir/$appid
+ Path appLogDir = new Path(rootLogDir, appId);
+ try {
+ lfs.mkdir(appLogDir, appLogDirPerms, true);
+ } catch (IOException e) {
+ LOG.warn("Unable to create the app-log directory : " + appLogDir, e);
+ continue;
+ }
+ appLogDirStatus = true;
+ }
+ if (!appLogDirStatus) {
+ throw new IOException("Not able to initialize app-log directories "
+ + "in any of the configured local directories for app " + appId);
+ }
+ }
+
+ /**
+ * Create application log directories on all disks.
+ */
+ private void createContainerLogDirs(String appId, String containerId)
+ throws IOException {
+ String[] rootLogDirs =
+ getConf()
+ .getStrings(NMConfig.NM_LOG_DIR, NMConfig.DEFAULT_NM_LOG_DIR);
+
+ boolean containerLogDirStatus = false;
+ FsPermission containerLogDirPerms = new FsPermission(LOGDIR_PERM);
+ for (String rootLogDir : rootLogDirs) {
+ // create $log.dir/$appid/$containerid
+ Path appLogDir = new Path(rootLogDir, appId);
+ Path containerLogDir = new Path(appLogDir, containerId);
+ try {
+ lfs.mkdir(containerLogDir, containerLogDirPerms, true);
+ } catch (IOException e) {
+ LOG.warn("Unable to create the container-log directory : "
+ + appLogDir, e);
+ continue;
+ }
+ containerLogDirStatus = true;
+ }
+ if (!containerLogDirStatus) {
+ throw new IOException(
+ "Not able to initialize container-log directories "
+ + "in any of the configured local directories for container "
+ + containerId);
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
new file mode 100644
index 0000000..f62a5cb
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
@@ -0,0 +1,140 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import java.io.IOException;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import static java.util.concurrent.TimeUnit.*;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
+import org.apache.hadoop.yarn.service.AbstractService;
+
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.*;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+public class DeletionService extends AbstractService {
+
+ static final Log LOG = LogFactory.getLog(DeletionService.class);
+ /** Delay before deleting resource to ease debugging of NM issues */
+ static final String DEBUG_DELAY_SEC =
+ NMConfig.NM_PREFIX + "debug.delete.delay";
+
+ private int debugDelay;
+ private final ContainerExecutor exec;
+ private ScheduledThreadPoolExecutor sched;
+ private final FileContext lfs = getLfs();
+ static final FileContext getLfs() {
+ try {
+ return FileContext.getLocalFSFileContext();
+ } catch (UnsupportedFileSystemException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public DeletionService(ContainerExecutor exec) {
+ super(DeletionService.class.getName());
+ this.exec = exec;
+ this.debugDelay = 0;
+ }
+
+ /**
+ * Delete the path(s) as this user.
+ * @param user The user to delete as, or the JVM user if null
+ * @param p Paths to delete
+ */
+ public void delete(String user, Path subDir, Path... baseDirs) {
+ // TODO if parent owned by NM, rename within parent inline
+ sched.schedule(new FileDeletion(user, subDir, baseDirs),
+ debugDelay, TimeUnit.SECONDS);
+ }
+
+ @Override
+ public void init(Configuration conf) {
+ if (conf != null) {
+ sched = new ScheduledThreadPoolExecutor(
+ conf.getInt(NM_MAX_DELETE_THREADS, DEFAULT_MAX_DELETE_THREADS));
+ debugDelay = conf.getInt(DEBUG_DELAY_SEC, 0);
+ } else {
+ sched = new ScheduledThreadPoolExecutor(DEFAULT_MAX_DELETE_THREADS);
+ }
+ sched.setKeepAliveTime(60L, SECONDS);
+ super.init(conf);
+ }
+
+ @Override
+ public void stop() {
+ sched.shutdown();
+ try {
+ sched.awaitTermination(10, SECONDS);
+ } catch (InterruptedException e) {
+ sched.shutdownNow();
+ }
+ super.stop();
+ }
+
+ private class FileDeletion implements Runnable {
+ final String user;
+ final Path subDir;
+ final Path[] baseDirs;
+ FileDeletion(String user, Path subDir, Path[] baseDirs) {
+ this.user = user;
+ this.subDir = subDir;
+ this.baseDirs = baseDirs;
+ }
+ @Override
+ public void run() {
+ if (null == user) {
+ if (baseDirs == null || baseDirs.length == 0) {
+ LOG.debug("NM deleting absolute path : " + subDir);
+ try {
+ lfs.delete(subDir, true);
+ } catch (IOException e) {
+ LOG.warn("Failed to delete " + subDir);
+ }
+ return;
+ }
+ for (Path baseDir : baseDirs) {
+ Path del = subDir == null? baseDir : new Path(baseDir, subDir);
+ LOG.debug("NM deleting path : " + del);
+ try {
+ lfs.delete(del, true);
+ } catch (IOException e) {
+ LOG.warn("Failed to delete " + subDir);
+ }
+ }
+ } else {
+ try {
+ exec.deleteAsUser(user, subDir, baseDirs);
+ } catch (IOException e) {
+ LOG.warn("Failed to delete as user " + user, e);
+ } catch (InterruptedException e) {
+ LOG.warn("Failed to delete as user " + user, e);
+ }
+ }
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
new file mode 100644
index 0000000..56361cb
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -0,0 +1,269 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.Shell.ExitCodeException;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+public class LinuxContainerExecutor extends ContainerExecutor {
+
+ private static final Log LOG = LogFactory
+ .getLog(LinuxContainerExecutor.class);
+
+ private String containerExecutorExe;
+ protected static final String CONTAINER_EXECUTOR_EXEC_KEY =
+ NMConfig.NM_PREFIX + "linux-container-executor.path";
+
+ @Override
+ public void setConf(Configuration conf) {
+ super.setConf(conf);
+ containerExecutorExe = getContainerExecutorExecutablePath(conf);
+ }
+
+ /**
+ * List of commands that the setuid script will execute.
+ */
+ enum Commands {
+ INITIALIZE_JOB(0),
+ LAUNCH_CONTAINER(1),
+ SIGNAL_CONTAINER(2),
+ DELETE_AS_USER(3),
+ DELETE_LOG_AS_USER(4);
+
+ private int value;
+ Commands(int value) {
+ this.value = value;
+ }
+ int getValue() {
+ return value;
+ }
+ }
+
+ /**
+ * Result codes returned from the C container-executor.
+ * These must match the values in container-executor.h.
+ */
+ enum ResultCode {
+ OK(0),
+ INVALID_USER_NAME(2),
+ INVALID_TASK_PID(9),
+ INVALID_TASKCONTROLLER_PERMISSIONS(22),
+ INVALID_CONFIG_FILE(24);
+
+ private final int value;
+ ResultCode(int value) {
+ this.value = value;
+ }
+ int getValue() {
+ return value;
+ }
+ }
+
+ protected String getContainerExecutorExecutablePath(Configuration conf) {
+ File hadoopBin = new File(System.getenv("YARN_HOME"), "bin");
+ String defaultPath =
+ new File(hadoopBin, "container-executor").getAbsolutePath();
+ return null == conf
+ ? defaultPath
+ : conf.get(CONTAINER_EXECUTOR_EXEC_KEY, defaultPath);
+ }
+
+ @Override
+ public void startLocalizer(Path nmPrivateContainerTokensPath,
+ InetSocketAddress nmAddr, String user, String appId, String locId,
+ List<Path> localDirs) throws IOException, InterruptedException {
+ List<String> command = new ArrayList<String>(
+ Arrays.asList(containerExecutorExe,
+ user,
+ Integer.toString(Commands.INITIALIZE_JOB.getValue()),
+ appId,
+ nmPrivateContainerTokensPath.toUri().getPath().toString()));
+ File jvm = // use same jvm as parent
+ new File(new File(System.getProperty("java.home"), "bin"), "java");
+ command.add(jvm.toString());
+ command.add("-classpath");
+ command.add(System.getProperty("java.class.path"));
+ command.add(ContainerLocalizer.class.getName());
+ command.add(user);
+ command.add(appId);
+ command.add(locId);
+ command.add(nmAddr.getHostName());
+ command.add(Integer.toString(nmAddr.getPort()));
+ for (Path p : localDirs) {
+ command.add(p.toUri().getPath().toString());
+ }
+ String[] commandArray = command.toArray(new String[command.size()]);
+ ShellCommandExecutor shExec = new ShellCommandExecutor(commandArray);
+ // TODO: DEBUG
+ LOG.info("initApplication: " + Arrays.toString(commandArray));
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("initApplication: " + Arrays.toString(commandArray));
+ }
+ try {
+ shExec.execute();
+ if (LOG.isDebugEnabled()) {
+ logOutput(shExec.getOutput());
+ }
+ } catch (ExitCodeException e) {
+ int exitCode = shExec.getExitCode();
+ LOG.warn("Exit code from container is : " + exitCode);
+ logOutput(shExec.getOutput());
+ throw new IOException("App initialization failed (" + exitCode + ")", e);
+ }
+ }
+
+ @Override
+ public int launchContainer(Container container,
+ Path nmPrivateCotainerScriptPath, Path nmPrivateTokensPath,
+ String user, String appId, Path containerWorkDir) throws IOException {
+
+ ContainerId containerId = container.getContainerID();
+ String containerIdStr = ConverterUtils.toString(containerId);
+ List<String> command = new ArrayList<String>(
+ Arrays.asList(containerExecutorExe,
+ user,
+ Integer.toString(Commands.LAUNCH_CONTAINER.getValue()),
+ appId,
+ containerIdStr,
+ containerWorkDir.toString(),
+ nmPrivateCotainerScriptPath.toUri().getPath().toString(),
+ nmPrivateTokensPath.toUri().getPath().toString()));
+ String[] commandArray = command.toArray(new String[command.size()]);
+ ShellCommandExecutor shExec = new ShellCommandExecutor(commandArray);
+ launchCommandObjs.put(containerId, shExec);
+ // DEBUG
+ LOG.info("launchContainer: " + Arrays.toString(commandArray));
+ String output = shExec.getOutput();
+ try {
+ shExec.execute();
+ if (LOG.isDebugEnabled()) {
+ logOutput(output);
+ }
+ } catch (ExitCodeException e) {
+ int exitCode = shExec.getExitCode();
+ LOG.warn("Exit code from container is : " + exitCode);
+ // 143 (SIGTERM) and 137 (SIGKILL) exit codes means the container was
+ // terminated/killed forcefully. In all other cases, log the
+ // container-executor's output
+ if (exitCode != 143 && exitCode != 137) {
+ LOG.warn("Exception from container-launch : ", e);
+ logOutput(output);
+ String diagnostics = "Exception from container-launch: \n"
+ + StringUtils.stringifyException(e) + "\n" + output;
+ container.handle(new ContainerDiagnosticsUpdateEvent(containerId,
+ diagnostics));
+ } else {
+ container.handle(new ContainerDiagnosticsUpdateEvent(containerId,
+ "Container killed on request. Exit code is " + exitCode));
+ }
+ return exitCode;
+ } finally {
+ launchCommandObjs.remove(containerId);
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Output from LinuxContainerExecutor's launchContainer follows:");
+ logOutput(output);
+ }
+ return 0;
+ }
+
+ @Override
+ public boolean signalContainer(String user, String pid, Signal signal)
+ throws IOException {
+
+ String[] command =
+ new String[] { containerExecutorExe,
+ user,
+ Integer.toString(Commands.SIGNAL_CONTAINER.getValue()),
+ pid,
+ Integer.toString(signal.getValue()) };
+ ShellCommandExecutor shExec = new ShellCommandExecutor(command);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("signalContainer: " + Arrays.toString(command));
+ }
+ try {
+ shExec.execute();
+ } catch (ExitCodeException e) {
+ int ret_code = shExec.getExitCode();
+ if (ret_code == ResultCode.INVALID_TASK_PID.getValue()) {
+ return false;
+ }
+ logOutput(shExec.getOutput());
+ throw new IOException("Problem signalling container " + pid + " with " +
+ signal + "; exit = " + ret_code);
+ }
+ return true;
+ }
+
+ @Override
+ public void deleteAsUser(String user, Path dir, Path... baseDirs) {
+ List<String> command = new ArrayList<String>(
+ Arrays.asList(containerExecutorExe,
+ user,
+ Integer.toString(Commands.DELETE_AS_USER.getValue()),
+ dir == null ? "" : dir.toUri().getPath()));
+ if (baseDirs == null || baseDirs.length == 0) {
+ LOG.info("Deleting absolute path : " + dir);
+ } else {
+ for (Path baseDir : baseDirs) {
+ Path del = dir == null ? baseDir : new Path(baseDir, dir);
+ LOG.info("Deleting path : " + del);
+ command.add(baseDir.toUri().getPath());
+ }
+ }
+ String[] commandArray = command.toArray(new String[command.size()]);
+ ShellCommandExecutor shExec = new ShellCommandExecutor(commandArray);
+ LOG.info(" -- DEBUG -- deleteAsUser: " + Arrays.toString(commandArray));
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("deleteAsUser: " + Arrays.toString(commandArray));
+ }
+ try {
+ shExec.execute();
+ if (LOG.isDebugEnabled()) {
+ logOutput(shExec.getOutput());
+ }
+ } catch (IOException e) {
+ int exitCode = shExec.getExitCode();
+ LOG.warn("Exit code from container is : " + exitCode);
+ if (exitCode != 0) {
+ LOG.error("DeleteAsUser for " + dir.toUri().getPath()
+ + " returned with non-zero exit code" + exitCode);
+ LOG.error("Output from LinuxContainerExecutor's deleteAsUser follows:");
+ logOutput(shExec.getOutput());
+ }
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NMConfig.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NMConfig.java
new file mode 100644
index 0000000..77bb1a8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NMConfig.java
@@ -0,0 +1,103 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+/** this class stores all the configuration constant keys
+ * for the nodemanager. All the configuration key variables
+ * that are going to be used in the nodemanager should be
+ * stored here. This allows us to see all the configuration
+ * parameters at one place.
+ */
+public class NMConfig {
+ public static final String NM_PREFIX = "yarn.server.nodemanager.";
+
+ public static final String DEFAULT_NM_BIND_ADDRESS = "0.0.0.0:45454";
+
+ /** host:port address to which to bind to **/
+ public static final String NM_BIND_ADDRESS = NM_PREFIX + "address";
+
+ public static final String DEFAULT_NM_HTTP_BIND_ADDRESS = "0.0.0.0:9999";
+
+ /** host:port address to which webserver has to bind to **/
+ public static final String NM_HTTP_BIND_ADDRESS = NM_PREFIX + "http-address";
+
+ public static final String DEFAULT_NM_LOCALIZER_BIND_ADDRESS = "0.0.0.0:4344";
+
+ public static final String NM_LOCALIZER_BIND_ADDRESS =
+ NM_PREFIX + "localizer.address";
+
+ public static final String NM_KEYTAB = NM_PREFIX + "keytab";
+
+ public static final String NM_CONTAINER_EXECUTOR_CLASS = NM_PREFIX
+ + "container-executor.class";
+
+ public static final String NM_LOCAL_DIR = NM_PREFIX + "local-dir";
+
+ public static final String DEFAULT_NM_LOCAL_DIR = "/tmp/nm-local-dir";
+
+ public static final String NM_LOG_DIR = NM_PREFIX + "log.dir"; // TODO: Rename
+
+ public static final String DEFAULT_NM_LOG_DIR = "/tmp/logs";
+
+ public static final String REMOTE_USER_LOG_DIR = NM_PREFIX
+ + "remote-app-log-dir";
+
+ public static final String DEFAULT_REMOTE_APP_LOG_DIR = "/tmp/logs";
+
+ public static final int DEFAULT_NM_VMEM_GB = 8;
+
+ public static final String NM_VMEM_GB = NM_PREFIX + "resource.memory.gb";
+
+ // TODO: Should this instead be dictated by RM?
+ public static final String HEARTBEAT_INTERVAL = NM_PREFIX
+ + "heartbeat-interval";
+
+ public static final int DEFAULT_HEARTBEAT_INTERVAL = 1000;
+
+ public static final String NM_MAX_DELETE_THREADS = NM_PREFIX +
+ "max.delete.threads";
+
+ public static final int DEFAULT_MAX_DELETE_THREADS = 4;
+
+ public static final String NM_MAX_PUBLIC_FETCH_THREADS = NM_PREFIX +
+ "max.public.fetch.threads";
+
+ public static final int DEFAULT_MAX_PUBLIC_FETCH_THREADS = 4;
+
+ public static final String NM_LOCALIZATION_THREADS =
+ NM_PREFIX + "localiation.threads";
+
+ public static final int DEFAULT_NM_LOCALIZATION_THREADS = 5;
+
+ public static final String NM_CONTAINER_MGR_THREADS =
+ NM_PREFIX + "container.manager.threads";
+
+ public static final int DEFAULT_NM_CONTAINER_MGR_THREADS = 5;
+
+ public static final String NM_TARGET_CACHE_MB =
+ NM_PREFIX + "target.cache.size";
+
+ public static final long DEFAULT_NM_TARGET_CACHE_MB = 10 * 1024;
+
+ public static final String NM_CACHE_CLEANUP_MS =
+ NM_PREFIX + "target.cache.cleanup.period.ms";
+
+ public static final long DEFAULT_NM_CACHE_CLEANUP_MS = 10 * 60 * 1000;
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
new file mode 100644
index 0000000..d02c30d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -0,0 +1,194 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.NM_CONTAINER_EXECUTOR_CLASS;
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.NM_KEYTAB;
+
+import java.io.IOException;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+
+import org.apache.hadoop.NodeHealthCheckerService;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.YarnServerConfig;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
+import org.apache.hadoop.yarn.server.nodemanager.webapp.WebServer;
+import org.apache.hadoop.yarn.service.CompositeService;
+import org.apache.hadoop.yarn.service.Service;
+
+public class NodeManager extends CompositeService {
+ protected final NodeManagerMetrics metrics = NodeManagerMetrics.create();
+
+ public NodeManager() {
+ super(NodeManager.class.getName());
+ }
+
+ protected NodeStatusUpdater createNodeStatusUpdater(Context context,
+ Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
+ return new NodeStatusUpdaterImpl(context, dispatcher, healthChecker,
+ metrics);
+ }
+
+ protected NodeResourceMonitor createNodeResourceMonitor() {
+ return new NodeResourceMonitorImpl();
+ }
+
+ protected ContainerManagerImpl createContainerManager(Context context,
+ ContainerExecutor exec, DeletionService del,
+ NodeStatusUpdater nodeStatusUpdater) {
+ return new ContainerManagerImpl(context, exec, del, nodeStatusUpdater,
+ metrics);
+ }
+
+ protected WebServer createWebServer(Context nmContext,
+ ResourceView resourceView) {
+ return new WebServer(nmContext, resourceView);
+ }
+
+ protected void doSecureLogin() throws IOException {
+ SecurityUtil.login(getConfig(), NM_KEYTAB,
+ YarnServerConfig.NM_SERVER_PRINCIPAL_KEY);
+ }
+
+ @Override
+ public void init(Configuration conf) {
+
+ Context context = new NMContext();
+
+ ContainerExecutor exec = ReflectionUtils.newInstance(
+ conf.getClass(NM_CONTAINER_EXECUTOR_CLASS,
+ DefaultContainerExecutor.class, ContainerExecutor.class), conf);
+ DeletionService del = new DeletionService(exec);
+ addService(del);
+
+ // NodeManager level dispatcher
+ AsyncDispatcher dispatcher = new AsyncDispatcher();
+
+ NodeHealthCheckerService healthChecker = null;
+ if (NodeHealthCheckerService.shouldRun(conf)) {
+ healthChecker = new NodeHealthCheckerService();
+ addService(healthChecker);
+ }
+
+ // StatusUpdater should be added first so that it can start first. Once it
+ // contacts RM, does registration and gets tokens, then only
+ // ContainerManager can start.
+ NodeStatusUpdater nodeStatusUpdater =
+ createNodeStatusUpdater(context, dispatcher, healthChecker);
+ addService(nodeStatusUpdater);
+
+ NodeResourceMonitor nodeResourceMonitor = createNodeResourceMonitor();
+ addService(nodeResourceMonitor);
+
+ ContainerManagerImpl containerManager =
+ createContainerManager(context, exec, del, nodeStatusUpdater);
+ addService(containerManager);
+
+ Service webServer =
+ createWebServer(context, containerManager.getContainersMonitor());
+ addService(webServer);
+
+ dispatcher.register(ContainerManagerEventType.class, containerManager);
+ addService(dispatcher);
+
+ Runtime.getRuntime().addShutdownHook(new Thread() {
+ @Override
+ public void run() {
+ NodeManager.this.stop();
+ }
+ });
+
+ DefaultMetricsSystem.initialize("NodeManager");
+
+ super.init(conf);
+ // TODO add local dirs to del
+ }
+
+ @Override
+ public void start() {
+ try {
+ doSecureLogin();
+ } catch (IOException e) {
+ throw new YarnException("Failed NodeManager login", e);
+ }
+ super.start();
+ }
+
+ @Override
+ public void stop() {
+ super.stop();
+ DefaultMetricsSystem.shutdown();
+ }
+
+ public static class NMContext implements Context {
+
+ private final ConcurrentMap<ApplicationId, Application> applications =
+ new ConcurrentHashMap<ApplicationId, Application>();
+ private final ConcurrentMap<ContainerId, Container> containers =
+ new ConcurrentSkipListMap<ContainerId, Container>();
+
+ private final NodeHealthStatus nodeHealthStatus = RecordFactoryProvider
+ .getRecordFactory(null).newRecordInstance(NodeHealthStatus.class);
+
+ public NMContext() {
+ this.nodeHealthStatus.setIsNodeHealthy(true);
+ this.nodeHealthStatus.setHealthReport("Healthy");
+ this.nodeHealthStatus.setLastHealthReportTime(System.currentTimeMillis());
+ }
+
+ @Override
+ public ConcurrentMap<ApplicationId, Application> getApplications() {
+ return this.applications;
+ }
+
+ @Override
+ public ConcurrentMap<ContainerId, Container> getContainers() {
+ return this.containers;
+ }
+
+ @Override
+ public NodeHealthStatus getNodeHealthStatus() {
+ return this.nodeHealthStatus;
+ }
+ }
+
+ public static void main(String[] args) {
+ NodeManager nodeManager = new NodeManager();
+ YarnConfiguration conf = new YarnConfiguration();
+ nodeManager.init(conf);
+ nodeManager.start();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitor.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitor.java
new file mode 100644
index 0000000..219bb6e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitor.java
@@ -0,0 +1,25 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import org.apache.hadoop.yarn.service.Service;
+
+public interface NodeResourceMonitor extends Service {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java
new file mode 100644
index 0000000..1e394c4
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java
@@ -0,0 +1,30 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import org.apache.hadoop.yarn.service.AbstractService;
+
+public class NodeResourceMonitorImpl extends AbstractService implements
+ NodeResourceMonitor {
+
+ public NodeResourceMonitorImpl() {
+ super(NodeResourceMonitorImpl.class.getName());
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdater.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdater.java
new file mode 100644
index 0000000..caf3a72
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdater.java
@@ -0,0 +1,31 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import org.apache.hadoop.yarn.service.Service;
+
+public interface NodeStatusUpdater extends Service {
+
+ byte[] getRMNMSharedSecret();
+
+ String getContainerManagerBindAddress();
+
+ void sendOutofBandHeartBeat();
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
new file mode 100644
index 0000000..3ae0121
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -0,0 +1,290 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map.Entry;
+
+import org.apache.avro.AvroRuntimeException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.NodeHealthCheckerService;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.server.RMNMSecurityInfoClass;
+import org.apache.hadoop.yarn.server.YarnServerConfig;
+import org.apache.hadoop.yarn.server.api.ResourceTracker;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
+import org.apache.hadoop.yarn.server.api.records.NodeStatus;
+import org.apache.hadoop.yarn.server.api.records.RegistrationResponse;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
+import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.util.Records;
+
+public class NodeStatusUpdaterImpl extends AbstractService implements
+ NodeStatusUpdater {
+
+ private static final Log LOG = LogFactory.getLog(NodeStatusUpdaterImpl.class);
+
+ private final Object heartbeatMonitor = new Object();
+
+ private final Context context;
+ private final Dispatcher dispatcher;
+
+ private long heartBeatInterval;
+ private ResourceTracker resourceTracker;
+ private String rmAddress;
+ private Resource totalResource;
+ private String containerManagerBindAddress;
+ private String nodeHttpAddress;
+ private String hostName;
+ private int containerManagerPort;
+ private int httpPort;
+ private NodeId nodeId;
+ private byte[] secretKeyBytes = new byte[0];
+ private boolean isStopped;
+ private RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+ private final NodeHealthCheckerService healthChecker;
+ private final NodeManagerMetrics metrics;
+
+ public NodeStatusUpdaterImpl(Context context, Dispatcher dispatcher,
+ NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics) {
+ super(NodeStatusUpdaterImpl.class.getName());
+ this.healthChecker = healthChecker;
+ this.context = context;
+ this.dispatcher = dispatcher;
+ this.metrics = metrics;
+ }
+
+ @Override
+ public synchronized void init(Configuration conf) {
+ this.rmAddress =
+ conf.get(YarnServerConfig.RESOURCETRACKER_ADDRESS,
+ YarnServerConfig.DEFAULT_RESOURCETRACKER_BIND_ADDRESS);
+ this.heartBeatInterval =
+ conf.getLong(NMConfig.HEARTBEAT_INTERVAL,
+ NMConfig.DEFAULT_HEARTBEAT_INTERVAL);
+ int memory = conf.getInt(NMConfig.NM_VMEM_GB, NMConfig.DEFAULT_NM_VMEM_GB);
+ this.totalResource = recordFactory.newRecordInstance(Resource.class);
+ this.totalResource.setMemory(memory * 1024);
+ metrics.addResource(totalResource);
+ super.init(conf);
+ }
+
+ @Override
+ public void start() {
+ String cmBindAddressStr =
+ getConfig().get(NMConfig.NM_BIND_ADDRESS,
+ NMConfig.DEFAULT_NM_BIND_ADDRESS);
+ InetSocketAddress cmBindAddress =
+ NetUtils.createSocketAddr(cmBindAddressStr);
+ String httpBindAddressStr =
+ getConfig().get(NMConfig.NM_HTTP_BIND_ADDRESS,
+ NMConfig.DEFAULT_NM_HTTP_BIND_ADDRESS);
+ InetSocketAddress httpBindAddress =
+ NetUtils.createSocketAddr(httpBindAddressStr);
+ try {
+ this.hostName = InetAddress.getLocalHost().getHostAddress();
+ this.containerManagerPort = cmBindAddress.getPort();
+ this.httpPort = httpBindAddress.getPort();
+ this.containerManagerBindAddress =
+ this.hostName + ":" + this.containerManagerPort;
+ this.nodeHttpAddress = this.hostName + ":" + this.httpPort;
+ LOG.info("Configured ContainerManager Address is "
+ + this.containerManagerBindAddress);
+ // Registration has to be in start so that ContainerManager can get the
+ // perNM tokens needed to authenticate ContainerTokens.
+ registerWithRM();
+ super.start();
+ startStatusUpdater();
+ } catch (Exception e) {
+ throw new AvroRuntimeException(e);
+ }
+ }
+
+ @Override
+ public synchronized void stop() {
+ // Interrupt the updater.
+ this.isStopped = true;
+ super.stop();
+ }
+
+ protected ResourceTracker getRMClient() {
+ YarnRPC rpc = YarnRPC.create(getConfig());
+ InetSocketAddress rmAddress = NetUtils.createSocketAddr(this.rmAddress);
+ Configuration rmClientConf = new Configuration(getConfig());
+ rmClientConf.setClass(
+ YarnConfiguration.YARN_SECURITY_INFO,
+ RMNMSecurityInfoClass.class, SecurityInfo.class);
+ return (ResourceTracker) rpc.getProxy(ResourceTracker.class, rmAddress,
+ rmClientConf);
+ }
+
+ private void registerWithRM() throws YarnRemoteException {
+ this.resourceTracker = getRMClient();
+ LOG.info("Connected to ResourceManager at " + this.rmAddress);
+
+ RegisterNodeManagerRequest request = recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
+ this.nodeId = Records.newRecord(NodeId.class);
+ this.nodeId.setHost(this.hostName);
+ this.nodeId.setPort(this.containerManagerPort);
+ request.setHttpPort(this.httpPort);
+ request.setResource(this.totalResource);
+ request.setNodeId(this.nodeId);
+ RegistrationResponse regResponse =
+ this.resourceTracker.registerNodeManager(request).getRegistrationResponse();
+ if (UserGroupInformation.isSecurityEnabled()) {
+ this.secretKeyBytes = regResponse.getSecretKey().array();
+ }
+
+ LOG.info("Registered with ResourceManager as " + this.containerManagerBindAddress
+ + " with total resource of " + this.totalResource);
+ }
+
+ @Override
+ public String getContainerManagerBindAddress() {
+ return this.containerManagerBindAddress;
+ }
+
+ @Override
+ public byte[] getRMNMSharedSecret() {
+ return this.secretKeyBytes.clone();
+ }
+
+ private NodeStatus getNodeStatus() {
+
+ NodeStatus nodeStatus = recordFactory.newRecordInstance(NodeStatus.class);
+ nodeStatus.setNodeId(this.nodeId);
+
+ int numActiveContainers = 0;
+ for (Iterator<Entry<ContainerId, Container>> i =
+ this.context.getContainers().entrySet().iterator(); i.hasNext();) {
+ Entry<ContainerId, Container> e = i.next();
+ ContainerId containerId = e.getKey();
+ Container container = e.getValue();
+
+ List<org.apache.hadoop.yarn.api.records.Container> applicationContainers = nodeStatus
+ .getContainers(container.getContainerID().getAppId());
+ if (applicationContainers == null) {
+ applicationContainers = new ArrayList<org.apache.hadoop.yarn.api.records.Container>();
+ nodeStatus.setContainers(container.getContainerID().getAppId(),
+ applicationContainers);
+ }
+
+ // Clone the container to send it to the RM
+ org.apache.hadoop.yarn.api.records.Container c = container.cloneAndGetContainer();
+ c.setNodeId(this.nodeId);
+ c.setNodeHttpAddress(this.nodeHttpAddress); // TODO: don't set everytime.
+ applicationContainers.add(c);
+ ++numActiveContainers;
+ LOG.info("Sending out status for container: " + c);
+
+ if (c.getState() == ContainerState.COMPLETE) {
+ // Remove
+ i.remove();
+
+ LOG.info("Removed completed container " + containerId);
+ }
+ }
+
+ LOG.debug(this.containerManagerBindAddress + " sending out status for " + numActiveContainers
+ + " containers");
+
+ NodeHealthStatus nodeHealthStatus = this.context.getNodeHealthStatus();
+ if (this.healthChecker != null) {
+ this.healthChecker.setHealthStatus(nodeHealthStatus);
+ }
+ LOG.debug("Node's health-status : " + nodeHealthStatus.getIsNodeHealthy()
+ + ", " + nodeHealthStatus.getHealthReport());
+ nodeStatus.setNodeHealthStatus(nodeHealthStatus);
+
+ return nodeStatus;
+ }
+
+ @Override
+ public void sendOutofBandHeartBeat() {
+ synchronized (this.heartbeatMonitor) {
+ this.heartbeatMonitor.notify();
+ }
+ }
+
+ protected void startStatusUpdater() {
+
+ new Thread() {
+ @Override
+ public void run() {
+ int lastHeartBeatID = 0;
+ while (!isStopped) {
+ // Send heartbeat
+ try {
+ synchronized (heartbeatMonitor) {
+ heartbeatMonitor.wait(heartBeatInterval);
+ }
+ NodeStatus nodeStatus = getNodeStatus();
+ nodeStatus.setResponseId(lastHeartBeatID);
+
+ NodeHeartbeatRequest request = recordFactory.newRecordInstance(NodeHeartbeatRequest.class);
+ request.setNodeStatus(nodeStatus);
+ HeartbeatResponse response =
+ resourceTracker.nodeHeartbeat(request).getHeartbeatResponse();
+ lastHeartBeatID = response.getResponseId();
+ List<ContainerId> containersToCleanup = response
+ .getContainersToCleanupList();
+ if (containersToCleanup.size() != 0) {
+ dispatcher.getEventHandler().handle(
+ new CMgrCompletedContainersEvent(containersToCleanup));
+ }
+ List<ApplicationId> appsToCleanup =
+ response.getApplicationsToCleanupList();
+ if (appsToCleanup.size() != 0) {
+ dispatcher.getEventHandler().handle(
+ new CMgrCompletedAppsEvent(appsToCleanup));
+ }
+ } catch (Throwable e) {
+ LOG.error("Caught exception in status-updater", e);
+ break;
+ }
+ }
+ }
+ }.start();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ResourceView.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ResourceView.java
new file mode 100644
index 0000000..ab47fdd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ResourceView.java
@@ -0,0 +1,26 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+public interface ResourceView {
+
+ long getVmemAllocatedForContainers();
+
+ long getPmemAllocatedForContainers();
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/LocalizationProtocol.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/LocalizationProtocol.java
new file mode 100644
index 0000000..b501271
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/LocalizationProtocol.java
@@ -0,0 +1,27 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.api;
+
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus;
+
+public interface LocalizationProtocol {
+ public LocalizerHeartbeatResponse heartbeat(LocalizerStatus status)
+ throws YarnRemoteException;
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/client/LocalizationProtocolPBClientImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/client/LocalizationProtocolPBClientImpl.java
new file mode 100644
index 0000000..1cd981c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/client/LocalizationProtocolPBClientImpl.java
@@ -0,0 +1,67 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.api.impl.pb.client;
+
+import java.io.IOException;
+
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine;
+import org.apache.hadoop.yarn.proto.LocalizationProtocol.LocalizationProtocolService;
+import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb.LocalizerHeartbeatResponsePBImpl;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb.LocalizerStatusPBImpl;
+import static org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerStatusProto;
+
+import com.google.protobuf.ServiceException;
+
+public class LocalizationProtocolPBClientImpl implements LocalizationProtocol {
+
+ private LocalizationProtocolService.BlockingInterface proxy;
+
+ public LocalizationProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException {
+ RPC.setProtocolEngine(conf, LocalizationProtocolService.BlockingInterface.class, ProtoOverHadoopRpcEngine.class);
+ proxy = (LocalizationProtocolService.BlockingInterface)RPC.getProxy(
+ LocalizationProtocolService.BlockingInterface.class, clientVersion, addr, conf);
+ }
+
+ @Override
+ public LocalizerHeartbeatResponse heartbeat(LocalizerStatus status)
+ throws YarnRemoteException {
+ LocalizerStatusProto statusProto = ((LocalizerStatusPBImpl)status).getProto();
+ try {
+ return new LocalizerHeartbeatResponsePBImpl(
+ proxy.heartbeat(null, statusProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/service/LocalizationProtocolPBServiceImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/service/LocalizationProtocolPBServiceImpl.java
new file mode 100644
index 0000000..d69a4f9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/service/LocalizationProtocolPBServiceImpl.java
@@ -0,0 +1,53 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.api.impl.pb.service;
+
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb.LocalizerHeartbeatResponsePBImpl;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb.LocalizerStatusPBImpl;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.proto.LocalizationProtocol.LocalizationProtocolService.BlockingInterface;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerHeartbeatResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerStatusProto;
+import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse;
+
+public class LocalizationProtocolPBServiceImpl implements BlockingInterface {
+
+ private LocalizationProtocol real;
+
+ public LocalizationProtocolPBServiceImpl(LocalizationProtocol impl) {
+ this.real = impl;
+ }
+
+ @Override
+ public LocalizerHeartbeatResponseProto heartbeat(RpcController controller,
+ LocalizerStatusProto proto) throws ServiceException {
+ LocalizerStatusPBImpl request = new LocalizerStatusPBImpl(proto);
+ try {
+ LocalizerHeartbeatResponse response = real.heartbeat(request);
+ return ((LocalizerHeartbeatResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/LocalResourceStatus.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/LocalResourceStatus.java
new file mode 100644
index 0000000..91870b4
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/LocalResourceStatus.java
@@ -0,0 +1,36 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords;
+
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+
+public interface LocalResourceStatus {
+ public LocalResource getResource();
+ public ResourceStatusType getStatus();
+ public URL getLocalPath();
+ public long getLocalSize();
+ public YarnRemoteException getException();
+
+ public void setResource(LocalResource resource);
+ public void setStatus(ResourceStatusType status);
+ public void setLocalPath(URL localPath);
+ public void setLocalSize(long size);
+ public void setException(YarnRemoteException exception);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/LocalizerAction.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/LocalizerAction.java
new file mode 100644
index 0000000..c7ede44
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/LocalizerAction.java
@@ -0,0 +1,22 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords;
+
+public enum LocalizerAction {
+ LIVE, DIE
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/LocalizerHeartbeatResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/LocalizerHeartbeatResponse.java
new file mode 100644
index 0000000..b2f46c5
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/LocalizerHeartbeatResponse.java
@@ -0,0 +1,35 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords;
+
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.LocalResource;
+
+public interface LocalizerHeartbeatResponse {
+ public LocalizerAction getLocalizerAction();
+ public List<LocalResource> getAllResources();
+ public LocalResource getLocalResource(int i);
+
+ public void setLocalizerAction(LocalizerAction action);
+
+ public void addAllResources(List<LocalResource> resources);
+ public void addResource(LocalResource resource);
+ public void removeResource(int index);
+ public void clearResources();
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/LocalizerStatus.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/LocalizerStatus.java
new file mode 100644
index 0000000..b10a872
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/LocalizerStatus.java
@@ -0,0 +1,33 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords;
+
+import java.util.List;
+
+public interface LocalizerStatus {
+
+ String getLocalizerId();
+ void setLocalizerId(String id);
+
+ List<LocalResourceStatus> getResources();
+ void addAllResources(List<LocalResourceStatus> resources);
+ void addResourceStatus(LocalResourceStatus resource);
+ LocalResourceStatus getResourceStatus(int index);
+ void removeResource(int index);
+ void clearResources();
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/ResourceStatusType.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/ResourceStatusType.java
new file mode 100644
index 0000000..ae9fc59
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/ResourceStatusType.java
@@ -0,0 +1,24 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords;
+
+public enum ResourceStatusType {
+ FETCH_PENDING,
+ FETCH_SUCCESS,
+ FETCH_FAILURE,
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/LocalResourceStatusPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/LocalResourceStatusPBImpl.java
new file mode 100644
index 0000000..57f9608
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/LocalResourceStatusPBImpl.java
@@ -0,0 +1,224 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.api.records.impl.pb.LocalResourcePBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.URLPBImpl;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.URLProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.YarnRemoteExceptionProto;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalResourceStatusProto;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalResourceStatusProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.ResourceStatusTypeProto;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.ResourceStatusType;
+
+public class LocalResourceStatusPBImpl
+ extends ProtoBase<LocalResourceStatusProto> implements LocalResourceStatus {
+
+ LocalResourceStatusProto proto =
+ LocalResourceStatusProto.getDefaultInstance();
+ LocalResourceStatusProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private LocalResource resource;
+ private URL localPath;
+ private YarnRemoteException exception;
+
+ public LocalResourceStatusPBImpl() {
+ builder = LocalResourceStatusProto.newBuilder();
+ }
+
+ public LocalResourceStatusPBImpl(LocalResourceStatusProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public LocalResourceStatusProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.resource != null &&
+ !((LocalResourcePBImpl)this.resource).getProto()
+ .equals(builder.getResource())) {
+ builder.setResource(convertToProtoFormat(this.resource));
+ }
+ if (this.localPath != null &&
+ !((URLPBImpl)this.localPath).getProto()
+ .equals(builder.getLocalPath())) {
+ builder.setLocalPath(convertToProtoFormat(this.localPath));
+ }
+ if (this.exception != null &&
+ !((YarnRemoteExceptionPBImpl)this.exception).getProto()
+ .equals(builder.getException())) {
+ builder.setException(convertToProtoFormat(this.exception));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = LocalResourceStatusProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ @Override
+ public LocalResource getResource() {
+ LocalResourceStatusProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.resource != null) {
+ return this.resource;
+ }
+ if (!p.hasResource()) {
+ return null;
+ }
+ this.resource = convertFromProtoFormat(p.getResource());
+ return this.resource;
+ }
+
+ @Override
+ public ResourceStatusType getStatus() {
+ LocalResourceStatusProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasStatus()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getStatus());
+ }
+
+ @Override
+ public URL getLocalPath() {
+ LocalResourceStatusProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.localPath != null) {
+ return this.localPath;
+ }
+ if (!p.hasLocalPath()) {
+ return null;
+ }
+ this.localPath = convertFromProtoFormat(p.getLocalPath());
+ return this.localPath;
+ }
+
+ @Override
+ public long getLocalSize() {
+ LocalResourceStatusProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getLocalSize());
+ }
+
+ @Override
+ public YarnRemoteException getException() {
+ LocalResourceStatusProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.exception != null) {
+ return this.exception;
+ }
+ if (!p.hasException()) {
+ return null;
+ }
+ this.exception = convertFromProtoFormat(p.getException());
+ return this.exception;
+ }
+
+
+ @Override
+ public void setResource(LocalResource resource) {
+ maybeInitBuilder();
+ if (resource == null)
+ builder.clearResource();
+ this.resource = resource;
+ }
+
+ @Override
+ public void setStatus(ResourceStatusType status) {
+ maybeInitBuilder();
+ if (status == null) {
+ builder.clearStatus();
+ return;
+ }
+ builder.setStatus(convertToProtoFormat(status));
+ }
+
+ @Override
+ public void setLocalPath(URL localPath) {
+ maybeInitBuilder();
+ if (localPath == null)
+ builder.clearLocalPath();
+ this.localPath = localPath;
+ }
+
+ @Override
+ public void setLocalSize(long size) {
+ maybeInitBuilder();
+ builder.setLocalSize(size);
+ }
+
+ @Override
+ public void setException(YarnRemoteException exception) {
+ maybeInitBuilder();
+ if (exception == null)
+ builder.clearException();
+ this.exception = exception;
+ }
+
+ private LocalResourceProto convertToProtoFormat(LocalResource rsrc) {
+ return ((LocalResourcePBImpl)rsrc).getProto();
+ }
+
+ private LocalResourcePBImpl convertFromProtoFormat(LocalResourceProto rsrc) {
+ return new LocalResourcePBImpl(rsrc);
+ }
+
+ private URLPBImpl convertFromProtoFormat(URLProto p) {
+ return new URLPBImpl(p);
+ }
+
+ private URLProto convertToProtoFormat(URL t) {
+ return ((URLPBImpl)t).getProto();
+ }
+
+ private ResourceStatusTypeProto convertToProtoFormat(ResourceStatusType e) {
+ return ResourceStatusTypeProto.valueOf(e.name());
+ }
+
+ private ResourceStatusType convertFromProtoFormat(ResourceStatusTypeProto e) {
+ return ResourceStatusType.valueOf(e.name());
+ }
+
+ private YarnRemoteExceptionPBImpl convertFromProtoFormat(YarnRemoteExceptionProto p) {
+ return new YarnRemoteExceptionPBImpl(p);
+ }
+
+ private YarnRemoteExceptionProto convertToProtoFormat(YarnRemoteException t) {
+ return ((YarnRemoteExceptionPBImpl)t).getProto();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/LocalizerHeartbeatResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/LocalizerHeartbeatResponsePBImpl.java
new file mode 100644
index 0000000..0b791c0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/LocalizerHeartbeatResponsePBImpl.java
@@ -0,0 +1,195 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.LocalResourcePBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerActionProto;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerHeartbeatResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerHeartbeatResponseProtoOrBuilder;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerAction;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse;
+
+public class LocalizerHeartbeatResponsePBImpl
+ extends ProtoBase<LocalizerHeartbeatResponseProto>
+ implements LocalizerHeartbeatResponse {
+
+ LocalizerHeartbeatResponseProto proto =
+ LocalizerHeartbeatResponseProto.getDefaultInstance();
+ LocalizerHeartbeatResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private List<LocalResource> resources;
+
+ public LocalizerHeartbeatResponsePBImpl() {
+ builder = LocalizerHeartbeatResponseProto.newBuilder();
+ }
+
+ public LocalizerHeartbeatResponsePBImpl(LocalizerHeartbeatResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public LocalizerHeartbeatResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (resources != null) {
+ addResourcesToProto();
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = LocalizerHeartbeatResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ public LocalizerAction getLocalizerAction() {
+ LocalizerHeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasAction()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getAction());
+ }
+
+ public List<LocalResource> getAllResources() {
+ initResources();
+ return this.resources;
+ }
+
+ public LocalResource getLocalResource(int i) {
+ initResources();
+ return this.resources.get(i);
+ }
+
+ public void setLocalizerAction(LocalizerAction action) {
+ maybeInitBuilder();
+ if (action == null) {
+ builder.clearAction();
+ return;
+ }
+ builder.setAction(convertToProtoFormat(action));
+ }
+
+ private void initResources() {
+ if (this.resources != null) {
+ return;
+ }
+ LocalizerHeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder;
+ List<LocalResourceProto> list = p.getResourcesList();
+ this.resources = new ArrayList<LocalResource>();
+
+ for (LocalResourceProto c : list) {
+ this.resources.add(convertFromProtoFormat(c));
+ }
+ }
+
+ private void addResourcesToProto() {
+ maybeInitBuilder();
+ builder.clearResources();
+ if (this.resources == null)
+ return;
+ Iterable<LocalResourceProto> iterable =
+ new Iterable<LocalResourceProto>() {
+ @Override
+ public Iterator<LocalResourceProto> iterator() {
+ return new Iterator<LocalResourceProto>() {
+
+ Iterator<LocalResource> iter = resources.iterator();
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+
+ @Override
+ public LocalResourceProto next() {
+ return convertToProtoFormat(iter.next());
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+
+ }
+ };
+
+ }
+ };
+ builder.addAllResources(iterable);
+ }
+
+ public void addAllResources(List<LocalResource> resources) {
+ if (resources == null)
+ return;
+ initResources();
+ this.resources.addAll(resources);
+ }
+
+ public void addResource(LocalResource resource) {
+ initResources();
+ this.resources.add(resource);
+ }
+
+ public void removeResource(int index) {
+ initResources();
+ this.resources.remove(index);
+ }
+
+ public void clearResources() {
+ initResources();
+ this.resources.clear();
+ }
+
+ private LocalResource convertFromProtoFormat(LocalResourceProto p) {
+ return new LocalResourcePBImpl(p);
+ }
+
+ private LocalResourceProto convertToProtoFormat(LocalResource s) {
+ return ((LocalResourcePBImpl)s).getProto();
+ }
+
+ private LocalizerActionProto convertToProtoFormat(LocalizerAction a) {
+ return LocalizerActionProto.valueOf(a.name());
+ }
+
+ private LocalizerAction convertFromProtoFormat(LocalizerActionProto a) {
+ return LocalizerAction.valueOf(a.name());
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/LocalizerStatusPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/LocalizerStatusPBImpl.java
new file mode 100644
index 0000000..06b0020
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/LocalizerStatusPBImpl.java
@@ -0,0 +1,192 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalResourceStatusProto;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerStatusProto;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerStatusProtoOrBuilder;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus;
+
+public class LocalizerStatusPBImpl
+ extends ProtoBase<LocalizerStatusProto> implements LocalizerStatus {
+
+ LocalizerStatusProto proto =
+ LocalizerStatusProto.getDefaultInstance();
+ LocalizerStatusProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private List<LocalResourceStatus> resources = null;
+
+ public LocalizerStatusPBImpl() {
+ builder = LocalizerStatusProto.newBuilder();
+ }
+
+ public LocalizerStatusPBImpl(LocalizerStatusProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public LocalizerStatusProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.resources != null) {
+ addResourcesToProto();
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = LocalizerStatusProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ @Override
+ public String getLocalizerId() {
+ LocalizerStatusProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasLocalizerId()) {
+ return null;
+ }
+ return (p.getLocalizerId());
+ }
+
+ @Override
+ public List<LocalResourceStatus> getResources() {
+ initResources();
+ return this.resources;
+ }
+
+ @Override
+ public void setLocalizerId(String localizerId) {
+ maybeInitBuilder();
+ if (localizerId == null) {
+ builder.clearLocalizerId();
+ return;
+ }
+ builder.setLocalizerId(localizerId);
+ }
+
+ private void initResources() {
+ if (this.resources != null) {
+ return;
+ }
+ LocalizerStatusProtoOrBuilder p = viaProto ? proto : builder;
+ List<LocalResourceStatusProto> list = p.getResourcesList();
+ this.resources = new ArrayList<LocalResourceStatus>();
+
+ for (LocalResourceStatusProto c : list) {
+ this.resources.add(convertFromProtoFormat(c));
+ }
+ }
+
+ private void addResourcesToProto() {
+ maybeInitBuilder();
+ builder.clearResources();
+ if (this.resources == null)
+ return;
+ Iterable<LocalResourceStatusProto> iterable =
+ new Iterable<LocalResourceStatusProto>() {
+ @Override
+ public Iterator<LocalResourceStatusProto> iterator() {
+ return new Iterator<LocalResourceStatusProto>() {
+
+ Iterator<LocalResourceStatus> iter = resources.iterator();
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+
+ @Override
+ public LocalResourceStatusProto next() {
+ return convertToProtoFormat(iter.next());
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+
+ }
+ };
+
+ }
+ };
+ builder.addAllResources(iterable);
+ }
+
+ @Override
+ public void addAllResources(List<LocalResourceStatus> resources) {
+ if (resources == null)
+ return;
+ initResources();
+ this.resources.addAll(resources);
+ }
+
+ @Override
+ public LocalResourceStatus getResourceStatus(int index) {
+ initResources();
+ return this.resources.get(index);
+ }
+
+ @Override
+ public void addResourceStatus(LocalResourceStatus resource) {
+ initResources();
+ this.resources.add(resource);
+ }
+
+ @Override
+ public void removeResource(int index) {
+ initResources();
+ this.resources.remove(index);
+ }
+
+ @Override
+ public void clearResources() {
+ initResources();
+ this.resources.clear();
+ }
+
+ private LocalResourceStatus
+ convertFromProtoFormat(LocalResourceStatusProto p) {
+ return new LocalResourceStatusPBImpl(p);
+ }
+
+ private LocalResourceStatusProto convertToProtoFormat(LocalResourceStatus s) {
+ return ((LocalResourceStatusPBImpl)s).getProto();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
new file mode 100644
index 0000000..4abf4a6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
@@ -0,0 +1,151 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager;
+
+import java.nio.ByteBuffer;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.service.Service;
+import org.apache.hadoop.yarn.service.ServiceStateChangeListener;
+
+public class AuxServices extends AbstractService
+ implements ServiceStateChangeListener, EventHandler<AuxServicesEvent> {
+
+ private static final Log LOG = LogFactory.getLog(AuxServices.class);
+
+ public static final String AUX_SERVICES = "nodemanager.auxiluary.services";
+ public static final String AUX_SERVICE_CLASS_FMT =
+ "nodemanager.aux.service.%s.class";
+ public final Map<String,AuxiliaryService> serviceMap;
+
+ public AuxServices() {
+ super(AuxServices.class.getName());
+ serviceMap =
+ Collections.synchronizedMap(new HashMap<String,AuxiliaryService>());
+ // Obtain services from configuration in init()
+ }
+
+ protected final synchronized void addService(String name,
+ AuxiliaryService service) {
+ LOG.info("Adding auxiliary service " +
+ service.getName() + ", \"" + name + "\"");
+ serviceMap.put(name, service);
+ }
+
+ Collection<AuxiliaryService> getServices() {
+ return Collections.unmodifiableCollection(serviceMap.values());
+ }
+
+ @Override
+ public void init(Configuration conf) {
+ Collection<String> auxNames = conf.getStringCollection(AUX_SERVICES);
+ for (final String sName : auxNames) {
+ try {
+ Class<? extends AuxiliaryService> sClass = conf.getClass(
+ String.format(AUX_SERVICE_CLASS_FMT, sName), null,
+ AuxiliaryService.class);
+ if (null == sClass) {
+ throw new RuntimeException("No class defiend for " + sName);
+ }
+ AuxiliaryService s = ReflectionUtils.newInstance(sClass, conf);
+ // TODO better use use s.getName()?
+ addService(sName, s);
+ s.init(conf);
+ } catch (RuntimeException e) {
+ LOG.fatal("Failed to initialize " + sName, e);
+ throw e;
+ }
+ }
+ super.init(conf);
+ }
+
+ @Override
+ public void start() {
+ // TODO fork(?) services running as configured user
+ // monitor for health, shutdown/restart(?) if any should die
+ for (Service service : serviceMap.values()) {
+ service.start();
+ service.register(this);
+ }
+ super.start();
+ }
+
+ @Override
+ public void stop() {
+ try {
+ synchronized (serviceMap) {
+ for (Service service : serviceMap.values()) {
+ if (service.getServiceState() == Service.STATE.STARTED) {
+ service.unregister(this);
+ service.stop();
+ }
+ }
+ serviceMap.clear();
+ }
+ } finally {
+ super.stop();
+ }
+ }
+
+ @Override
+ public void stateChanged(Service service) {
+ LOG.fatal("Service " + service.getName() + " changed state: " +
+ service.getServiceState());
+ stop();
+ }
+
+ @Override
+ public void handle(AuxServicesEvent event) {
+ LOG.info("Got event " + event.getType() + " for service "
+ + event.getServiceID());
+ AuxiliaryService service = serviceMap.get(event.getServiceID());
+ if (null == service) {
+ // TODO kill all containers waiting on Application
+ return;
+ }
+ switch (event.getType()) {
+ case APPLICATION_INIT:
+ service.initApp(event.getUser(), event.getApplicationID(),
+ event.getServiceData());
+ break;
+ case APPLICATION_STOP:
+ service.stopApp(event.getApplicationID());
+ break;
+ default:
+ throw new RuntimeException("Unknown type: " + event.getType());
+ }
+ }
+
+ public interface AuxiliaryService extends Service {
+ void initApp(String user, ApplicationId appId, ByteBuffer data);
+ void stopApp(ApplicationId appId);
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServicesEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServicesEvent.java
new file mode 100644
index 0000000..8e897be
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServicesEvent.java
@@ -0,0 +1,63 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager;
+
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.event.AbstractEvent;
+import org.apache.hadoop.yarn.event.Event;
+
+public class AuxServicesEvent extends AbstractEvent<AuxServicesEventType> {
+
+ private final String user;
+ private final String serviceId;
+ private final ByteBuffer serviceData;
+ private final ApplicationId appId;
+
+ public AuxServicesEvent(AuxServicesEventType eventType, ApplicationId appId) {
+ this(eventType, null, appId, null, null);
+ }
+
+ public AuxServicesEvent(AuxServicesEventType eventType, String user,
+ ApplicationId appId, String serviceId, ByteBuffer serviceData) {
+ super(eventType);
+ this.user = user;
+ this.appId = appId;
+ this.serviceId = serviceId;
+ this.serviceData = serviceData;
+ }
+
+ public String getServiceID() {
+ return serviceId;
+ }
+
+ public ByteBuffer getServiceData() {
+ return serviceData;
+ }
+
+ public String getUser() {
+ return user;
+ }
+
+ public ApplicationId getApplicationID() {
+ return appId;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServicesEventType.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServicesEventType.java
new file mode 100644
index 0000000..b8276b0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServicesEventType.java
@@ -0,0 +1,24 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager;
+
+public enum AuxServicesEventType {
+ APPLICATION_INIT,
+ APPLICATION_STOP
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerLocalization.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerLocalization.java
new file mode 100644
index 0000000..992ee6f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerLocalization.java
@@ -0,0 +1,21 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager;
+
+public interface ContainerLocalization {
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerLocalizationImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerLocalizationImpl.java
new file mode 100644
index 0000000..ffeb1f8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerLocalizationImpl.java
@@ -0,0 +1,29 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager;
+
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
+
+public class ContainerLocalizationImpl implements ContainerLocalization {
+
+ public ContainerLocalizationImpl(Dispatcher dispatcher, Application app,
+ LocalizationProtocol localization) {
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
new file mode 100644
index 0000000..c4bf2c2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -0,0 +1,396 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager;
+
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.DEFAULT_NM_BIND_ADDRESS;
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.NM_BIND_ADDRESS;
+import static org.apache.hadoop.yarn.service.Service.STATE.STARTED;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.ByteBuffer;
+import java.util.Map;
+
+import org.apache.avro.ipc.Server;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.io.DataInputByteBuffer;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.security.ContainerManagerSecurityInfo;
+import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent;
+import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedContainersEvent;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerManagerEvent;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.NMConfig;
+import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationImpl;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationInitEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerKillEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncher;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncherEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.LogAggregationService;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.event.LogAggregatorEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitor;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl;
+import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
+import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
+import org.apache.hadoop.yarn.service.CompositeService;
+import org.apache.hadoop.yarn.service.Service;
+import org.apache.hadoop.yarn.service.ServiceStateChangeListener;
+
+public class ContainerManagerImpl extends CompositeService implements
+ ServiceStateChangeListener, ContainerManager,
+ EventHandler<ContainerManagerEvent> {
+
+ private static final Log LOG = LogFactory.getLog(ContainerManagerImpl.class);
+
+ final Context context;
+ private final ContainersMonitor containersMonitor;
+ private Server server;
+ private InetSocketAddress cmBindAddressStr;
+ private final ResourceLocalizationService rsrcLocalizationSrvc;
+ private final ContainersLauncher containersLauncher;
+ private final AuxServices auxiluaryServices;
+ private final NodeManagerMetrics metrics;
+
+ private final NodeStatusUpdater nodeStatusUpdater;
+ private ContainerTokenSecretManager containerTokenSecretManager;
+
+ private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+ protected final AsyncDispatcher dispatcher;
+
+ private final DeletionService deletionService;
+
+ public ContainerManagerImpl(Context context, ContainerExecutor exec,
+ DeletionService deletionContext, NodeStatusUpdater nodeStatusUpdater,
+ NodeManagerMetrics metrics) {
+ super(ContainerManagerImpl.class.getName());
+ this.context = context;
+ dispatcher = new AsyncDispatcher();
+ this.deletionService = deletionContext;
+ this.metrics = metrics;
+
+ rsrcLocalizationSrvc =
+ createResourceLocalizationService(exec, deletionContext);
+ addService(rsrcLocalizationSrvc);
+
+ containersLauncher = createContainersLauncher(context, exec);
+ addService(containersLauncher);
+
+ this.nodeStatusUpdater = nodeStatusUpdater;
+ // Create the secretManager if need be.
+ if (UserGroupInformation.isSecurityEnabled()) {
+ LOG.info("Security is enabled on NodeManager. "
+ + "Creating ContainerTokenSecretManager");
+ this.containerTokenSecretManager = new ContainerTokenSecretManager();
+ }
+
+ // Start configurable services
+ auxiluaryServices = new AuxServices();
+ auxiluaryServices.register(this);
+ addService(auxiluaryServices);
+
+ this.containersMonitor =
+ new ContainersMonitorImpl(exec, dispatcher, this.context);
+ addService(this.containersMonitor);
+
+ LogAggregationService logAggregationService =
+ createLogAggregationService(this.deletionService);
+ addService(logAggregationService);
+
+ dispatcher.register(ContainerEventType.class,
+ new ContainerEventDispatcher());
+ dispatcher.register(ApplicationEventType.class,
+ new ApplicationEventDispatcher());
+ dispatcher.register(LocalizationEventType.class, rsrcLocalizationSrvc);
+ dispatcher.register(AuxServicesEventType.class, auxiluaryServices);
+ dispatcher.register(ContainersMonitorEventType.class, containersMonitor);
+ dispatcher.register(ContainersLauncherEventType.class, containersLauncher);
+ dispatcher.register(LogAggregatorEventType.class, logAggregationService);
+ addService(dispatcher);
+ }
+
+ protected LogAggregationService createLogAggregationService(
+ DeletionService deletionService) {
+ return new LogAggregationService(deletionService);
+ }
+
+ public ContainersMonitor getContainersMonitor() {
+ return this.containersMonitor;
+ }
+
+ protected ResourceLocalizationService createResourceLocalizationService(
+ ContainerExecutor exec, DeletionService deletionContext) {
+ return new ResourceLocalizationService(this.dispatcher, exec,
+ deletionContext);
+ }
+
+ protected ContainersLauncher createContainersLauncher(Context context,
+ ContainerExecutor exec) {
+ return new ContainersLauncher(context, this.dispatcher, exec);
+ }
+
+ @Override
+ public void init(Configuration conf) {
+ cmBindAddressStr = NetUtils.createSocketAddr(
+ conf.get(NM_BIND_ADDRESS, DEFAULT_NM_BIND_ADDRESS));
+ super.init(conf);
+ }
+
+ @Override
+ public void start() {
+
+ // Enqueue user dirs in deletion context
+
+ YarnRPC rpc = YarnRPC.create(getConfig());
+ if (UserGroupInformation.isSecurityEnabled()) {
+ // This is fine as status updater is started before ContainerManager and
+ // RM gives the shared secret in registration during StatusUpdter#start()
+ // itself.
+ this.containerTokenSecretManager.setSecretKey(
+ this.nodeStatusUpdater.getContainerManagerBindAddress(),
+ this.nodeStatusUpdater.getRMNMSharedSecret());
+ }
+ Configuration cmConf = new Configuration(getConfig());
+ cmConf.setClass(YarnConfiguration.YARN_SECURITY_INFO,
+ ContainerManagerSecurityInfo.class, SecurityInfo.class);
+ server =
+ rpc.getServer(ContainerManager.class, this, cmBindAddressStr, cmConf,
+ this.containerTokenSecretManager,
+ cmConf.getInt(NMConfig.NM_CONTAINER_MGR_THREADS,
+ NMConfig.DEFAULT_NM_CONTAINER_MGR_THREADS));
+ LOG.info("ContainerManager started at " + cmBindAddressStr);
+ server.start();
+ super.start();
+ }
+
+ @Override
+ public void stop() {
+ if (auxiluaryServices.getServiceState() == STARTED) {
+ auxiluaryServices.unregister(this);
+ }
+ if (server != null) {
+ server.close();
+ }
+ super.stop();
+ }
+
+ /**
+ * Start a container on this NodeManager.
+ */
+ @Override
+ public StartContainerResponse startContainer(StartContainerRequest request)
+ throws YarnRemoteException {
+ ContainerLaunchContext launchContext = request.getContainerLaunchContext();
+
+ LOG.info(" container is " + request);
+
+ // //////////// Parse credentials
+ ByteBuffer tokens = launchContext.getContainerTokens();
+ Credentials credentials = new Credentials();
+ if (tokens != null) {
+ DataInputByteBuffer buf = new DataInputByteBuffer();
+ tokens.rewind();
+ buf.reset(tokens);
+ try {
+ credentials.readTokenStorageStream(buf);
+ if (LOG.isDebugEnabled()) {
+ for (Token<? extends TokenIdentifier> tk : credentials
+ .getAllTokens()) {
+ LOG.debug(tk.getService() + " = " + tk.toString());
+ }
+ }
+ } catch (IOException e) {
+ throw RPCUtil.getRemoteException(e);
+ }
+ }
+ // //////////// End of parsing credentials
+
+ Container container =
+ new ContainerImpl(this.dispatcher, launchContext, credentials, metrics);
+ ContainerId containerID = launchContext.getContainerId();
+ ApplicationId applicationID = containerID.getAppId();
+ if (context.getContainers().putIfAbsent(containerID, container) != null) {
+ throw RPCUtil.getRemoteException("Container " + containerID
+ + " already is running on this node!!");
+ }
+
+ // Create the application
+ Application application = new ApplicationImpl(dispatcher,
+ launchContext.getUser(), applicationID, credentials);
+ if (null ==
+ context.getApplications().putIfAbsent(applicationID, application)) {
+ LOG.info("Creating a new application reference for app "
+ + applicationID);
+ }
+
+ // TODO: Validate the request
+ dispatcher.getEventHandler().handle(new ApplicationInitEvent(container));
+ StartContainerResponse response =
+ recordFactory.newRecordInstance(StartContainerResponse.class);
+ metrics.launchedContainer();
+ metrics.allocateContainer(launchContext.getResource());
+ return response;
+ }
+
+ @Override
+ public StopContainerResponse stopContainer(StopContainerRequest request)
+ throws YarnRemoteException {
+
+ StopContainerResponse response =
+ recordFactory.newRecordInstance(StopContainerResponse.class);
+
+ ContainerId containerID = request.getContainerId();
+ Container container = this.context.getContainers().get(containerID);
+ if (container == null) {
+ LOG.warn("Trying to stop unknown container " + containerID);
+ return response; // Return immediately.
+ }
+ dispatcher.getEventHandler().handle(
+ new ContainerKillEvent(containerID,
+ "Container killed by the ApplicationMaster."));
+
+ // TODO: Move this code to appropriate place once kill_container is
+ // implemented.
+ nodeStatusUpdater.sendOutofBandHeartBeat();
+
+ return response;
+ }
+
+ @Override
+ public GetContainerStatusResponse getContainerStatus(GetContainerStatusRequest request) throws YarnRemoteException {
+ ContainerId containerID = request.getContainerId();
+ LOG.info("Getting container-status for " + containerID);
+ Container container = this.context.getContainers().get(containerID);
+ if (container != null) {
+ ContainerStatus containerStatus = container.cloneAndGetContainerStatus();
+ LOG.info("Returning " + containerStatus);
+ GetContainerStatusResponse response = recordFactory.newRecordInstance(GetContainerStatusResponse.class);
+ response.setStatus(containerStatus);
+ return response;
+ } else {
+ throw RPCUtil.getRemoteException("Container " + containerID
+ + " is not handled by this NodeManager");
+ }
+ }
+
+ class ContainerEventDispatcher implements EventHandler<ContainerEvent> {
+ @Override
+ public void handle(ContainerEvent event) {
+ Map<ContainerId,Container> containers =
+ ContainerManagerImpl.this.context.getContainers();
+ Container c = containers.get(event.getContainerID());
+ if (c != null) {
+ c.handle(event);
+ } else {
+ LOG.warn("Event " + event + " sent to absent container " +
+ event.getContainerID());
+ }
+ }
+ }
+
+ class ApplicationEventDispatcher implements EventHandler<ApplicationEvent> {
+
+ @Override
+ public void handle(ApplicationEvent event) {
+ Application app =
+ ContainerManagerImpl.this.context.getApplications().get(
+ event.getApplicationID());
+ if (app != null) {
+ app.handle(event);
+ } else {
+ LOG.warn("Event " + event + " sent to absent application " +
+ event.getApplicationID());
+ }
+ }
+
+ }
+
+ @Override
+ public void handle(ContainerManagerEvent event) {
+ switch (event.getType()) {
+ case FINISH_APPS:
+ CMgrCompletedAppsEvent appsFinishedEvent =
+ (CMgrCompletedAppsEvent) event;
+ for (ApplicationId appID : appsFinishedEvent.getAppsToCleanup()) {
+ this.dispatcher.getEventHandler().handle(
+ new ApplicationEvent(appID,
+ ApplicationEventType.FINISH_APPLICATION));
+ }
+ break;
+ case FINISH_CONTAINERS:
+ CMgrCompletedContainersEvent containersFinishedEvent =
+ (CMgrCompletedContainersEvent) event;
+ for (ContainerId container : containersFinishedEvent
+ .getContainersToCleanup()) {
+ this.dispatcher.getEventHandler().handle(
+ new ContainerKillEvent(container,
+ "Container Killed by ResourceManager"));
+ }
+ break;
+ default:
+ LOG.warn("Invalid event " + event.getType() + ". Ignoring.");
+ }
+ }
+
+ @Override
+ public void stateChanged(Service service) {
+ // TODO Auto-generated method stub
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/Application.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/Application.java
new file mode 100644
index 0000000..b1571e9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/Application.java
@@ -0,0 +1,38 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.application;
+
+import java.util.Map;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+
+public interface Application extends EventHandler<ApplicationEvent> {
+
+ String getUser();
+
+ Map<ContainerId, Container> getContainers();
+
+ ApplicationId getAppId();
+
+ ApplicationState getApplicationState();
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationContainerFinishedEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationContainerFinishedEvent.java
new file mode 100644
index 0000000..2886621
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationContainerFinishedEvent.java
@@ -0,0 +1,36 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.application;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+
+public class ApplicationContainerFinishedEvent extends ApplicationEvent {
+ private ContainerId containerID;
+
+ public ApplicationContainerFinishedEvent(
+ ContainerId containerID) {
+ super(containerID.getAppId(),
+ ApplicationEventType.APPLICATION_CONTAINER_FINISHED);
+ this.containerID = containerID;
+ }
+
+ public ContainerId getContainerID() {
+ return this.containerID;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationEvent.java
new file mode 100644
index 0000000..23a9fcb
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationEvent.java
@@ -0,0 +1,39 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.application;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.event.AbstractEvent;
+import org.apache.hadoop.yarn.event.Event;
+
+public class ApplicationEvent extends AbstractEvent<ApplicationEventType> {
+
+ private final ApplicationId applicationID;
+
+ public ApplicationEvent(ApplicationId appID,
+ ApplicationEventType appEventType) {
+ super(appEventType);
+ this.applicationID = appID;
+ }
+
+ public ApplicationId getApplicationID() {
+ return this.applicationID;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationEventType.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationEventType.java
new file mode 100644
index 0000000..aea9c89
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationEventType.java
@@ -0,0 +1,36 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.application;
+
+public enum ApplicationEventType {
+
+ // Source: ContainerManager
+ INIT_APPLICATION,
+ FINISH_APPLICATION,
+
+ // Source: ResourceLocalizationService
+ APPLICATION_INITED,
+ APPLICATION_RESOURCES_CLEANEDUP,
+
+ // Source: Container
+ APPLICATION_CONTAINER_FINISHED,
+
+ // Source: LogAggregationService.
+ APPLICATION_FINISHED,
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
new file mode 100644
index 0000000..f711695
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
@@ -0,0 +1,323 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.application;
+
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerInitEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerKillEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ApplicationLocalizationEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.ContainerLogsRetentionPolicy;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.event.LogAggregatorAppFinishedEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.event.LogAggregatorAppStartedEvent;
+import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.MultipleArcTransition;
+import org.apache.hadoop.yarn.state.SingleArcTransition;
+import org.apache.hadoop.yarn.state.StateMachine;
+import org.apache.hadoop.yarn.state.StateMachineFactory;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+public class ApplicationImpl implements Application {
+
+ final Dispatcher dispatcher;
+ final String user;
+ final ApplicationId appId;
+ final Credentials credentials;
+
+ private static final Log LOG = LogFactory.getLog(Application.class);
+
+ Map<ContainerId, Container> containers =
+ new HashMap<ContainerId, Container>();
+
+ public ApplicationImpl(Dispatcher dispatcher, String user,
+ ApplicationId appId, Credentials credentials) {
+ this.dispatcher = dispatcher;
+ this.user = user.toString();
+ this.appId = appId;
+ this.credentials = credentials;
+ stateMachine = stateMachineFactory.make(this);
+ }
+
+ @Override
+ public String getUser() {
+ return user.toString();
+ }
+
+ @Override
+ public ApplicationId getAppId() {
+ return appId;
+ }
+
+ @Override
+ public synchronized ApplicationState getApplicationState() {
+ // TODO: Synchro should be at statemachine level.
+ // This is only for tests?
+ return this.stateMachine.getCurrentState();
+ }
+
+ @Override
+ public Map<ContainerId, Container> getContainers() {
+ return this.containers;
+ }
+
+ private static final ContainerDoneTransition CONTAINER_DONE_TRANSITION =
+ new ContainerDoneTransition();
+
+ private static StateMachineFactory<ApplicationImpl, ApplicationState,
+ ApplicationEventType, ApplicationEvent> stateMachineFactory =
+ new StateMachineFactory<ApplicationImpl, ApplicationState,
+ ApplicationEventType, ApplicationEvent>(ApplicationState.NEW)
+
+ // Transitions from NEW state
+ .addTransition(ApplicationState.NEW, ApplicationState.INITING,
+ ApplicationEventType.INIT_APPLICATION, new AppInitTransition())
+
+ // Transitions from INITING state
+ .addTransition(ApplicationState.INITING, ApplicationState.INITING,
+ ApplicationEventType.INIT_APPLICATION,
+ new AppIsInitingTransition())
+ .addTransition(ApplicationState.INITING,
+ EnumSet.of(ApplicationState.FINISHING_CONTAINERS_WAIT,
+ ApplicationState.APPLICATION_RESOURCES_CLEANINGUP),
+ ApplicationEventType.FINISH_APPLICATION,
+ new AppFinishTriggeredTransition())
+ .addTransition(ApplicationState.INITING, ApplicationState.RUNNING,
+ ApplicationEventType.APPLICATION_INITED,
+ new AppInitDoneTransition())
+
+ // Transitions from RUNNING state
+ .addTransition(ApplicationState.RUNNING,
+ ApplicationState.RUNNING,
+ ApplicationEventType.INIT_APPLICATION,
+ new DuplicateAppInitTransition())
+ .addTransition(ApplicationState.RUNNING,
+ ApplicationState.RUNNING,
+ ApplicationEventType.APPLICATION_CONTAINER_FINISHED,
+ CONTAINER_DONE_TRANSITION)
+ .addTransition(
+ ApplicationState.RUNNING,
+ EnumSet.of(ApplicationState.FINISHING_CONTAINERS_WAIT,
+ ApplicationState.APPLICATION_RESOURCES_CLEANINGUP),
+ ApplicationEventType.FINISH_APPLICATION,
+ new AppFinishTriggeredTransition())
+
+ // Transitions from FINISHING_CONTAINERS_WAIT state.
+ .addTransition(
+ ApplicationState.FINISHING_CONTAINERS_WAIT,
+ EnumSet.of(ApplicationState.FINISHING_CONTAINERS_WAIT,
+ ApplicationState.APPLICATION_RESOURCES_CLEANINGUP),
+ ApplicationEventType.APPLICATION_CONTAINER_FINISHED,
+ new AppFinishTransition())
+
+ // Transitions from APPLICATION_RESOURCES_CLEANINGUP state
+ .addTransition(ApplicationState.APPLICATION_RESOURCES_CLEANINGUP,
+ ApplicationState.APPLICATION_RESOURCES_CLEANINGUP,
+ ApplicationEventType.APPLICATION_CONTAINER_FINISHED)
+ .addTransition(ApplicationState.APPLICATION_RESOURCES_CLEANINGUP,
+ ApplicationState.FINISHED,
+ ApplicationEventType.APPLICATION_RESOURCES_CLEANEDUP,
+ new AppCompletelyDoneTransition())
+
+ // create the topology tables
+ .installTopology();
+
+ private final StateMachine<ApplicationState, ApplicationEventType, ApplicationEvent> stateMachine;
+
+ /**
+ * Notify services of new application.
+ */
+ static class AppInitTransition implements
+ SingleArcTransition<ApplicationImpl, ApplicationEvent> {
+ @Override
+ public void transition(ApplicationImpl app, ApplicationEvent event) {
+ ApplicationInitEvent initEvent = (ApplicationInitEvent) event;
+ Container container = initEvent.getContainer();
+ app.containers.put(container.getContainerID(), container);
+ app.dispatcher.getEventHandler().handle(
+ new ApplicationLocalizationEvent(
+ LocalizationEventType.INIT_APPLICATION_RESOURCES, app));
+ }
+ }
+
+ /**
+ * Absorb initialization events while the application initializes.
+ */
+ static class AppIsInitingTransition implements
+ SingleArcTransition<ApplicationImpl, ApplicationEvent> {
+ @Override
+ public void transition(ApplicationImpl app, ApplicationEvent event) {
+ ApplicationInitEvent initEvent = (ApplicationInitEvent) event;
+ Container container = initEvent.getContainer();
+ app.containers.put(container.getContainerID(), container);
+ LOG.info("Adding " + container.getContainerID()
+ + " to application " + app.toString());
+ }
+ }
+
+ static class AppInitDoneTransition implements
+ SingleArcTransition<ApplicationImpl, ApplicationEvent> {
+ @Override
+ public void transition(ApplicationImpl app, ApplicationEvent event) {
+
+ // Inform the logAggregator
+ app.dispatcher.getEventHandler().handle(
+ new LogAggregatorAppStartedEvent(app.appId, app.user,
+ app.credentials,
+ ContainerLogsRetentionPolicy.ALL_CONTAINERS)); // TODO: Fix
+
+ // Start all the containers waiting for ApplicationInit
+ for (Container container : app.containers.values()) {
+ app.dispatcher.getEventHandler().handle(new ContainerInitEvent(
+ container.getContainerID()));
+ }
+ }
+ }
+
+ static class DuplicateAppInitTransition implements
+ SingleArcTransition<ApplicationImpl, ApplicationEvent> {
+ @Override
+ public void transition(ApplicationImpl app, ApplicationEvent event) {
+ ApplicationInitEvent initEvent = (ApplicationInitEvent) event;
+ Container container = initEvent.getContainer();
+ app.containers.put(container.getContainerID(), container);
+ LOG.info("Adding " + container.getContainerID()
+ + " to application " + app.toString());
+ app.dispatcher.getEventHandler().handle(new ContainerInitEvent(
+ container.getContainerID()));
+ }
+ }
+
+ static final class ContainerDoneTransition implements
+ SingleArcTransition<ApplicationImpl, ApplicationEvent> {
+ @Override
+ public void transition(ApplicationImpl app, ApplicationEvent event) {
+ ApplicationContainerFinishedEvent containerEvent =
+ (ApplicationContainerFinishedEvent) event;
+ if (null == app.containers.remove(containerEvent.getContainerID())) {
+ LOG.warn("Removing unknown " + containerEvent.getContainerID() +
+ " from application " + app.toString());
+ } else {
+ LOG.info("Removing " + containerEvent.getContainerID() +
+ " from application " + app.toString());
+ }
+ }
+ }
+
+ void handleAppFinishWithContainersCleanedup() {
+ // Delete Application level resources
+ this.dispatcher.getEventHandler().handle(
+ new ApplicationLocalizationEvent(
+ LocalizationEventType.DESTROY_APPLICATION_RESOURCES, this));
+
+ // TODO: Trigger the LogsManager
+ }
+
+ static class AppFinishTriggeredTransition
+ implements
+ MultipleArcTransition<ApplicationImpl, ApplicationEvent, ApplicationState> {
+ @Override
+ public ApplicationState transition(ApplicationImpl app,
+ ApplicationEvent event) {
+
+ if (app.containers.isEmpty()) {
+ // No container to cleanup. Cleanup app level resources.
+ app.handleAppFinishWithContainersCleanedup();
+ return ApplicationState.APPLICATION_RESOURCES_CLEANINGUP;
+ }
+
+ // Send event to ContainersLauncher to finish all the containers of this
+ // application.
+ for (ContainerId containerID : app.containers.keySet()) {
+ app.dispatcher.getEventHandler().handle(
+ new ContainerKillEvent(containerID,
+ "Container killed on application-finish event from RM."));
+ }
+ return ApplicationState.FINISHING_CONTAINERS_WAIT;
+ }
+ }
+
+ static class AppFinishTransition implements
+ MultipleArcTransition<ApplicationImpl, ApplicationEvent, ApplicationState> {
+
+ @Override
+ public ApplicationState transition(ApplicationImpl app,
+ ApplicationEvent event) {
+
+ ApplicationContainerFinishedEvent containerFinishEvent =
+ (ApplicationContainerFinishedEvent) event;
+ LOG.info("Removing " + containerFinishEvent.getContainerID()
+ + " from application " + app.toString());
+ app.containers.remove(containerFinishEvent.getContainerID());
+
+ if (app.containers.isEmpty()) {
+ // All containers are cleanedup.
+ app.handleAppFinishWithContainersCleanedup();
+ return ApplicationState.APPLICATION_RESOURCES_CLEANINGUP;
+ }
+
+ return ApplicationState.FINISHING_CONTAINERS_WAIT;
+ }
+
+ }
+
+ static class AppCompletelyDoneTransition implements
+ SingleArcTransition<ApplicationImpl, ApplicationEvent> {
+ @Override
+ public void transition(ApplicationImpl app, ApplicationEvent event) {
+ // Inform the logService
+ app.dispatcher.getEventHandler().handle(
+ new LogAggregatorAppFinishedEvent(app.appId));
+ }
+ }
+
+ @Override
+ public synchronized void handle(ApplicationEvent event) {
+
+ ApplicationId applicationID = event.getApplicationID();
+ LOG.info("Processing " + applicationID + " of type " + event.getType());
+
+ ApplicationState oldState = stateMachine.getCurrentState();
+ ApplicationState newState = null;
+ try {
+ // queue event requesting init of the same app
+ newState = stateMachine.doTransition(event.getType(), event);
+ } catch (InvalidStateTransitonException e) {
+ LOG.warn("Can't handle this event at current state", e);
+ }
+ if (oldState != newState) {
+ LOG.info("Application " + applicationID + " transitioned from "
+ + oldState + " to " + newState);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return ConverterUtils.toString(appId);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationInitEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationInitEvent.java
new file mode 100644
index 0000000..15c048a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationInitEvent.java
@@ -0,0 +1,37 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.application;
+
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+
+public class ApplicationInitEvent extends ApplicationEvent {
+
+ private final Container container;
+
+ public ApplicationInitEvent(Container container) {
+ super(container.getContainerID().getAppId(),
+ ApplicationEventType.INIT_APPLICATION);
+ this.container = container;
+ }
+
+ public Container getContainer() {
+ return this.container;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationInitedEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationInitedEvent.java
new file mode 100644
index 0000000..75d4eca
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationInitedEvent.java
@@ -0,0 +1,28 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.application;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+
+public class ApplicationInitedEvent extends ApplicationEvent {
+
+ public ApplicationInitedEvent(ApplicationId appID) {
+ super(appID, ApplicationEventType.APPLICATION_INITED);
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationState.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationState.java
new file mode 100644
index 0000000..db8e10aa
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationState.java
@@ -0,0 +1,23 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.application;
+
+public enum ApplicationState {
+ NEW, INITING, RUNNING, FINISHING_CONTAINERS_WAIT, APPLICATION_RESOURCES_CLEANINGUP, FINISHED
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
new file mode 100644
index 0000000..6bd29e8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
@@ -0,0 +1,48 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.container;
+
+import java.util.Map;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.event.EventHandler;
+
+public interface Container extends EventHandler<ContainerEvent> {
+
+ org.apache.hadoop.yarn.api.records.ContainerId getContainerID();
+
+ String getUser();
+
+ ContainerState getContainerState();
+
+ ContainerLaunchContext getLaunchContext();
+
+ Credentials getCredentials();
+
+ Map<Path,String> getLocalizedResources();
+
+ org.apache.hadoop.yarn.api.records.Container cloneAndGetContainer();
+
+ ContainerStatus cloneAndGetContainerStatus();
+
+ String toString();
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerDiagnosticsUpdateEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerDiagnosticsUpdateEvent.java
new file mode 100644
index 0000000..dbb6e62
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerDiagnosticsUpdateEvent.java
@@ -0,0 +1,17 @@
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.container;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+
+public class ContainerDiagnosticsUpdateEvent extends ContainerEvent {
+
+ private final String diagnosticsUpdate;
+
+ public ContainerDiagnosticsUpdateEvent(ContainerId cID, String update) {
+ super(cID, ContainerEventType.UPDATE_DIAGNOSTICS_MSG);
+ this.diagnosticsUpdate = update;
+ }
+
+ public String getDiagnosticsUpdate() {
+ return this.diagnosticsUpdate;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerEvent.java
new file mode 100644
index 0000000..46ec270
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerEvent.java
@@ -0,0 +1,38 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.container;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.event.AbstractEvent;
+
+
+public class ContainerEvent extends AbstractEvent<ContainerEventType> {
+
+ private final ContainerId containerID;
+
+ public ContainerEvent(ContainerId cID, ContainerEventType eventType) {
+ super(eventType);
+ this.containerID = cID;
+ }
+
+ public ContainerId getContainerID() {
+ return containerID;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerEventType.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerEventType.java
new file mode 100644
index 0000000..5622f8c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerEventType.java
@@ -0,0 +1,40 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.container;
+
+public enum ContainerEventType {
+
+ // Producer: ContainerManager
+ INIT_CONTAINER,
+ KILL_CONTAINER,
+ UPDATE_DIAGNOSTICS_MSG,
+ CONTAINER_DONE,
+
+ // DownloadManager
+ CONTAINER_INITED,
+ RESOURCE_LOCALIZED,
+ RESOURCE_FAILED,
+ CONTAINER_RESOURCES_CLEANEDUP,
+
+ // Producer: ContainersLauncher
+ CONTAINER_LAUNCHED,
+ CONTAINER_EXITED_WITH_SUCCESS,
+ CONTAINER_EXITED_WITH_FAILURE,
+ CONTAINER_KILLED_ON_REQUEST,
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerExitEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerExitEvent.java
new file mode 100644
index 0000000..b941688
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerExitEvent.java
@@ -0,0 +1,35 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.container;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+
+public class ContainerExitEvent extends ContainerEvent {
+ private int exitCode;
+
+ public ContainerExitEvent(ContainerId cID, ContainerEventType eventType,
+ int exitCode) {
+ super(cID, eventType);
+ this.exitCode = exitCode;
+ }
+
+ public int getExitCode() {
+ return this.exitCode;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
new file mode 100644
index 0000000..660311c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -0,0 +1,710 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.container;
+
+import java.net.URISyntaxException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationContainerFinishedEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncherEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncherEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationRequestEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.event.LogAggregatorContainerFinishedEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainerStartMonitoringEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainerStopMonitoringEvent;
+import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
+import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.MultipleArcTransition;
+import org.apache.hadoop.yarn.state.SingleArcTransition;
+import org.apache.hadoop.yarn.state.StateMachine;
+import org.apache.hadoop.yarn.state.StateMachineFactory;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+public class ContainerImpl implements Container {
+
+ private final Lock readLock;
+ private final Lock writeLock;
+ private final Dispatcher dispatcher;
+ private final Credentials credentials;
+ private final NodeManagerMetrics metrics;
+ private final ContainerLaunchContext launchContext;
+ private String exitCode = "NA";
+ private final StringBuilder diagnostics;
+
+ private static final Log LOG = LogFactory.getLog(Container.class);
+ private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+ private final Map<LocalResourceRequest,String> pendingResources =
+ new HashMap<LocalResourceRequest,String>();
+ private final Map<Path,String> localizedResources =
+ new HashMap<Path,String>();
+
+ public ContainerImpl(Dispatcher dispatcher,
+ ContainerLaunchContext launchContext, Credentials creds,
+ NodeManagerMetrics metrics) {
+ this.dispatcher = dispatcher;
+ this.launchContext = launchContext;
+ this.diagnostics = new StringBuilder();
+ this.credentials = creds;
+ this.metrics = metrics;
+
+ ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
+ this.readLock = readWriteLock.readLock();
+ this.writeLock = readWriteLock.writeLock();
+
+ stateMachine = stateMachineFactory.make(this);
+ }
+
+ private static final ContainerDoneTransition CONTAINER_DONE_TRANSITION =
+ new ContainerDoneTransition();
+
+ private static final ContainerDiagnosticsUpdateTransition UPDATE_DIAGNOSTICS_TRANSITION =
+ new ContainerDiagnosticsUpdateTransition();
+
+ // State Machine for each container.
+ private static StateMachineFactory
+ <ContainerImpl, ContainerState, ContainerEventType, ContainerEvent>
+ stateMachineFactory =
+ new StateMachineFactory<ContainerImpl, ContainerState, ContainerEventType, ContainerEvent>(ContainerState.NEW)
+ // From NEW State
+ .addTransition(ContainerState.NEW,
+ EnumSet.of(ContainerState.LOCALIZING, ContainerState.LOCALIZED,
+ ContainerState.LOCALIZATION_FAILED),
+ ContainerEventType.INIT_CONTAINER, new RequestResourcesTransition())
+ .addTransition(ContainerState.NEW, ContainerState.NEW,
+ ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
+ UPDATE_DIAGNOSTICS_TRANSITION)
+ .addTransition(ContainerState.NEW, ContainerState.DONE,
+ ContainerEventType.KILL_CONTAINER, CONTAINER_DONE_TRANSITION)
+
+ // From LOCALIZING State
+ .addTransition(ContainerState.LOCALIZING,
+ EnumSet.of(ContainerState.LOCALIZING, ContainerState.LOCALIZED),
+ ContainerEventType.RESOURCE_LOCALIZED, new LocalizedTransition())
+ .addTransition(ContainerState.LOCALIZING,
+ ContainerState.LOCALIZATION_FAILED,
+ ContainerEventType.RESOURCE_FAILED,
+ new ResourceFailedTransition())
+ .addTransition(ContainerState.LOCALIZING, ContainerState.LOCALIZING,
+ ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
+ UPDATE_DIAGNOSTICS_TRANSITION)
+ .addTransition(ContainerState.LOCALIZING, ContainerState.KILLING,
+ ContainerEventType.KILL_CONTAINER,
+ new KillDuringLocalizationTransition())
+
+ // From LOCALIZATION_FAILED State
+ .addTransition(ContainerState.LOCALIZATION_FAILED,
+ ContainerState.DONE,
+ ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP,
+ CONTAINER_DONE_TRANSITION)
+ .addTransition(ContainerState.LOCALIZATION_FAILED,
+ ContainerState.LOCALIZATION_FAILED,
+ ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
+ UPDATE_DIAGNOSTICS_TRANSITION)
+
+ // From LOCALIZED State
+ .addTransition(ContainerState.LOCALIZED, ContainerState.RUNNING,
+ ContainerEventType.CONTAINER_LAUNCHED, new LaunchTransition())
+ .addTransition(ContainerState.LOCALIZED, ContainerState.EXITED_WITH_FAILURE,
+ ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
+ new ExitedWithFailureTransition())
+ .addTransition(ContainerState.LOCALIZED, ContainerState.LOCALIZED,
+ ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
+ UPDATE_DIAGNOSTICS_TRANSITION)
+ .addTransition(ContainerState.LOCALIZED, ContainerState.KILLING,
+ ContainerEventType.KILL_CONTAINER, new KillTransition())
+
+ // From RUNNING State
+ .addTransition(ContainerState.RUNNING,
+ ContainerState.EXITED_WITH_SUCCESS,
+ ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS,
+ new ExitedWithSuccessTransition())
+ .addTransition(ContainerState.RUNNING,
+ ContainerState.EXITED_WITH_FAILURE,
+ ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
+ new ExitedWithFailureTransition())
+ .addTransition(ContainerState.RUNNING, ContainerState.RUNNING,
+ ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
+ UPDATE_DIAGNOSTICS_TRANSITION)
+ .addTransition(ContainerState.RUNNING, ContainerState.KILLING,
+ ContainerEventType.KILL_CONTAINER, new KillTransition())
+
+ // From CONTAINER_EXITED_WITH_SUCCESS State
+ .addTransition(ContainerState.EXITED_WITH_SUCCESS, ContainerState.DONE,
+ ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP,
+ CONTAINER_DONE_TRANSITION)
+ .addTransition(ContainerState.EXITED_WITH_SUCCESS,
+ ContainerState.EXITED_WITH_SUCCESS,
+ ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
+ UPDATE_DIAGNOSTICS_TRANSITION)
+ .addTransition(ContainerState.EXITED_WITH_SUCCESS,
+ ContainerState.EXITED_WITH_SUCCESS,
+ ContainerEventType.KILL_CONTAINER)
+
+ // From EXITED_WITH_FAILURE State
+ .addTransition(ContainerState.EXITED_WITH_FAILURE, ContainerState.DONE,
+ ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP,
+ CONTAINER_DONE_TRANSITION)
+ .addTransition(ContainerState.EXITED_WITH_FAILURE,
+ ContainerState.EXITED_WITH_FAILURE,
+ ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
+ UPDATE_DIAGNOSTICS_TRANSITION)
+ .addTransition(ContainerState.EXITED_WITH_FAILURE,
+ ContainerState.EXITED_WITH_FAILURE,
+ ContainerEventType.KILL_CONTAINER)
+
+ // From KILLING State.
+ .addTransition(ContainerState.KILLING,
+ ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
+ ContainerEventType.CONTAINER_KILLED_ON_REQUEST,
+ new ContainerKilledTransition())
+ .addTransition(ContainerState.KILLING,
+ ContainerState.KILLING,
+ ContainerEventType.RESOURCE_LOCALIZED,
+ new LocalizedResourceDuringKillTransition())
+ .addTransition(ContainerState.KILLING, ContainerState.KILLING,
+ ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
+ UPDATE_DIAGNOSTICS_TRANSITION)
+ .addTransition(ContainerState.KILLING, ContainerState.KILLING,
+ ContainerEventType.KILL_CONTAINER)
+ .addTransition(ContainerState.KILLING, ContainerState.EXITED_WITH_SUCCESS,
+ ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS,
+ new ExitedWithSuccessTransition())
+ .addTransition(ContainerState.KILLING, ContainerState.EXITED_WITH_FAILURE,
+ ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
+ new ExitedWithFailureTransition())
+ .addTransition(ContainerState.KILLING,
+ ContainerState.DONE,
+ ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP,
+ CONTAINER_DONE_TRANSITION)
+
+ // From CONTAINER_CLEANEDUP_AFTER_KILL State.
+ .addTransition(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
+ ContainerState.DONE,
+ ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP,
+ CONTAINER_DONE_TRANSITION)
+ .addTransition(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
+ ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
+ ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
+ UPDATE_DIAGNOSTICS_TRANSITION)
+ .addTransition(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
+ ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
+ ContainerEventType.KILL_CONTAINER)
+
+ // From DONE
+ .addTransition(ContainerState.DONE, ContainerState.DONE,
+ ContainerEventType.KILL_CONTAINER)
+ .addTransition(ContainerState.DONE, ContainerState.DONE,
+ ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
+ UPDATE_DIAGNOSTICS_TRANSITION)
+
+ // create the topology tables
+ .installTopology();
+
+ private final StateMachine<ContainerState, ContainerEventType, ContainerEvent>
+ stateMachine;
+
+ private org.apache.hadoop.yarn.api.records.ContainerState getCurrentState() {
+ switch (stateMachine.getCurrentState()) {
+ case NEW:
+ case LOCALIZING:
+ case LOCALIZATION_FAILED:
+ case LOCALIZED:
+ case RUNNING:
+ case EXITED_WITH_SUCCESS:
+ case EXITED_WITH_FAILURE:
+ case KILLING:
+ case CONTAINER_CLEANEDUP_AFTER_KILL:
+ case CONTAINER_RESOURCES_CLEANINGUP:
+ return org.apache.hadoop.yarn.api.records.ContainerState.RUNNING;
+ case DONE:
+ default:
+ return org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE;
+ }
+ }
+
+ @Override
+ public ContainerId getContainerID() {
+ this.readLock.lock();
+ try {
+ return this.launchContext.getContainerId();
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public String getUser() {
+ this.readLock.lock();
+ try {
+ return this.launchContext.getUser();
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public Map<Path,String> getLocalizedResources() {
+ this.readLock.lock();
+ try {
+ assert ContainerState.LOCALIZED == getContainerState(); // TODO: FIXME!!
+ return localizedResources;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public Credentials getCredentials() {
+ this.readLock.lock();
+ try {
+ return credentials;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public ContainerState getContainerState() {
+ this.readLock.lock();
+ try {
+ return stateMachine.getCurrentState();
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public
+ org.apache.hadoop.yarn.api.records.Container cloneAndGetContainer() {
+ this.readLock.lock();
+ try {
+ org.apache.hadoop.yarn.api.records.Container c =
+ recordFactory.newRecordInstance(
+ org.apache.hadoop.yarn.api.records.Container.class);
+ c.setId(this.launchContext.getContainerId());
+ c.setResource(this.launchContext.getResource());
+ c.setState(getCurrentState());
+ c.setContainerStatus(cloneAndGetContainerStatus());
+ return c;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public ContainerLaunchContext getLaunchContext() {
+ this.readLock.lock();
+ try {
+ return launchContext;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public ContainerStatus cloneAndGetContainerStatus() {
+ this.readLock.lock();
+ try {
+ ContainerStatus containerStatus =
+ recordFactory.newRecordInstance(ContainerStatus.class);
+ containerStatus.setState(getCurrentState());
+ containerStatus.setContainerId(this.launchContext.getContainerId());
+ containerStatus.setDiagnostics(diagnostics.toString());
+ containerStatus.setExitStatus(String.valueOf(exitCode));
+ return containerStatus;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @SuppressWarnings("fallthrough")
+ private void finished() {
+ switch (getContainerState()) {
+ case EXITED_WITH_SUCCESS:
+ metrics.endRunningContainer();
+ metrics.completedContainer();
+ break;
+ case EXITED_WITH_FAILURE:
+ metrics.endRunningContainer();
+ // fall through
+ case LOCALIZATION_FAILED:
+ metrics.failedContainer();
+ break;
+ case CONTAINER_CLEANEDUP_AFTER_KILL:
+ metrics.endRunningContainer();
+ // fall through
+ case NEW:
+ metrics.killedContainer();
+ }
+
+ metrics.releaseContainer(getLaunchContext().getResource());
+
+ // Inform the application
+ ContainerId containerID = getContainerID();
+ EventHandler eventHandler = dispatcher.getEventHandler();
+ eventHandler.handle(new ApplicationContainerFinishedEvent(containerID));
+ // Remove the container from the resource-monitor
+ eventHandler.handle(new ContainerStopMonitoringEvent(containerID));
+ // Tell the logService too
+ eventHandler.handle(new LogAggregatorContainerFinishedEvent(
+ containerID, exitCode));
+ }
+
+ static class ContainerTransition implements
+ SingleArcTransition<ContainerImpl, ContainerEvent> {
+
+ @Override
+ public void transition(ContainerImpl container, ContainerEvent event) {
+ // Just drain the event and change the state.
+ }
+
+ }
+
+ @SuppressWarnings("unchecked") // dispatcher not typed
+ static class RequestResourcesTransition implements
+ MultipleArcTransition<ContainerImpl,ContainerEvent,ContainerState> {
+ @Override
+ public ContainerState transition(ContainerImpl container,
+ ContainerEvent event) {
+ final ContainerLaunchContext ctxt = container.getLaunchContext();
+ container.metrics.initingContainer();
+
+ // Inform the AuxServices about the opaque serviceData
+ Map<String,ByteBuffer> csd = ctxt.getAllServiceData();
+ if (csd != null) {
+ // This can happen more than once per Application as each container may
+ // have distinct service data
+ for (Map.Entry<String,ByteBuffer> service : csd.entrySet()) {
+ container.dispatcher.getEventHandler().handle(
+ new AuxServicesEvent(AuxServicesEventType.APPLICATION_INIT,
+ ctxt.getUser(), ctxt.getContainerId().getAppId(),
+ service.getKey().toString(), service.getValue()));
+ }
+ }
+
+ // Send requests for public, private resources
+ Map<String,LocalResource> cntrRsrc = ctxt.getAllLocalResources();
+ if (!cntrRsrc.isEmpty()) {
+ ArrayList<LocalResourceRequest> publicRsrc =
+ new ArrayList<LocalResourceRequest>();
+ ArrayList<LocalResourceRequest> privateRsrc =
+ new ArrayList<LocalResourceRequest>();
+ ArrayList<LocalResourceRequest> appRsrc =
+ new ArrayList<LocalResourceRequest>();
+ try {
+ for (Map.Entry<String,LocalResource> rsrc : cntrRsrc.entrySet()) {
+ try {
+ LocalResourceRequest req =
+ new LocalResourceRequest(rsrc.getValue());
+ container.pendingResources.put(req, rsrc.getKey());
+ switch (rsrc.getValue().getVisibility()) {
+ case PUBLIC:
+ publicRsrc.add(req);
+ break;
+ case PRIVATE:
+ privateRsrc.add(req);
+ break;
+ case APPLICATION:
+ appRsrc.add(req);
+ break;
+ }
+ } catch (URISyntaxException e) {
+ LOG.info("Got exception parsing " + rsrc.getKey()
+ + " and value " + rsrc.getValue());
+ throw e;
+ }
+ }
+ } catch (URISyntaxException e) {
+ // malformed resource; abort container launch
+ LOG.warn("Failed to parse resource-request", e);
+ container.dispatcher.getEventHandler().handle(
+ new ContainerLocalizationEvent(
+ LocalizationEventType.CLEANUP_CONTAINER_RESOURCES, container));
+ container.metrics.endInitingContainer();
+ return ContainerState.LOCALIZATION_FAILED;
+ }
+ if (!publicRsrc.isEmpty()) {
+ container.dispatcher.getEventHandler().handle(
+ new ContainerLocalizationRequestEvent(
+ container, publicRsrc, LocalResourceVisibility.PUBLIC));
+ }
+ if (!privateRsrc.isEmpty()) {
+ container.dispatcher.getEventHandler().handle(
+ new ContainerLocalizationRequestEvent(
+ container, privateRsrc, LocalResourceVisibility.PRIVATE));
+ }
+ if (!appRsrc.isEmpty()) {
+ container.dispatcher.getEventHandler().handle(
+ new ContainerLocalizationRequestEvent(
+ container, appRsrc, LocalResourceVisibility.APPLICATION));
+ }
+ return ContainerState.LOCALIZING;
+ } else {
+ container.dispatcher.getEventHandler().handle(
+ new ContainersLauncherEvent(container,
+ ContainersLauncherEventType.LAUNCH_CONTAINER));
+ container.metrics.endInitingContainer();
+ return ContainerState.LOCALIZED;
+ }
+ }
+ }
+
+ @SuppressWarnings("unchecked") // dispatcher not typed
+ static class LocalizedTransition implements
+ MultipleArcTransition<ContainerImpl,ContainerEvent,ContainerState> {
+ @Override
+ public ContainerState transition(ContainerImpl container,
+ ContainerEvent event) {
+ ContainerResourceLocalizedEvent rsrcEvent = (ContainerResourceLocalizedEvent) event;
+ String sym = container.pendingResources.remove(rsrcEvent.getResource());
+ if (null == sym) {
+ LOG.warn("Localized unknown resource " + rsrcEvent.getResource() +
+ " for container " + container.getContainerID());
+ assert false;
+ // fail container?
+ return ContainerState.LOCALIZING;
+ }
+ container.localizedResources.put(rsrcEvent.getLocation(), sym);
+ if (!container.pendingResources.isEmpty()) {
+ return ContainerState.LOCALIZING;
+ }
+ container.dispatcher.getEventHandler().handle(
+ new ContainersLauncherEvent(container,
+ ContainersLauncherEventType.LAUNCH_CONTAINER));
+ container.metrics.endInitingContainer();
+ return ContainerState.LOCALIZED;
+ }
+ }
+
+ @SuppressWarnings("unchecked") // dispatcher not typed
+ static class LaunchTransition extends ContainerTransition {
+ @Override
+ public void transition(ContainerImpl container, ContainerEvent event) {
+ // Inform the ContainersMonitor to start monitoring the container's
+ // resource usage.
+ // TODO: Fix pmem limits below
+ long vmemBytes =
+ container.getLaunchContext().getResource().getMemory() * 1024 * 1024L;
+ container.dispatcher.getEventHandler().handle(
+ new ContainerStartMonitoringEvent(container.getContainerID(),
+ vmemBytes, -1));
+ container.metrics.runningContainer();
+ }
+ }
+
+ @SuppressWarnings("unchecked") // dispatcher not typed
+ static class ExitedWithSuccessTransition extends ContainerTransition {
+ @Override
+ public void transition(ContainerImpl container, ContainerEvent event) {
+ // TODO: Add containerWorkDir to the deletion service.
+
+ // Inform the localizer to decrement reference counts and cleanup
+ // resources.
+ container.dispatcher.getEventHandler().handle(
+ new ContainerLocalizationEvent(
+ LocalizationEventType.CLEANUP_CONTAINER_RESOURCES, container));
+ }
+ }
+
+ @SuppressWarnings("unchecked") // dispatcher not typed
+ static class ExitedWithFailureTransition extends ContainerTransition {
+ @Override
+ public void transition(ContainerImpl container, ContainerEvent event) {
+ ContainerExitEvent exitEvent = (ContainerExitEvent) event;
+ container.exitCode = String.valueOf(exitEvent.getExitCode());
+
+ // TODO: Add containerWorkDir to the deletion service.
+ // TODO: Add containerOuputDir to the deletion service.
+
+ // Inform the localizer to decrement reference counts and cleanup
+ // resources.
+ container.dispatcher.getEventHandler().handle(
+ new ContainerLocalizationEvent(
+ LocalizationEventType.CLEANUP_CONTAINER_RESOURCES, container));
+ }
+ }
+
+ @SuppressWarnings("unchecked") // dispatcher not typed
+ static class ResourceFailedTransition implements
+ SingleArcTransition<ContainerImpl, ContainerEvent> {
+ @Override
+ public void transition(ContainerImpl container, ContainerEvent event) {
+
+ ContainerResourceFailedEvent rsrcFailedEvent =
+ (ContainerResourceFailedEvent) event;
+ container.diagnostics.append(
+ StringUtils.stringifyException(rsrcFailedEvent.getCause())).append(
+ "\n");
+
+ // Inform the localizer to decrement reference counts and cleanup
+ // resources.
+ container.dispatcher.getEventHandler().handle(
+ new ContainerLocalizationEvent(
+ LocalizationEventType.CLEANUP_CONTAINER_RESOURCES, container));
+ container.metrics.endInitingContainer();
+ }
+ }
+
+ @SuppressWarnings("unchecked") // dispatcher not typed
+ static class KillDuringLocalizationTransition implements
+ SingleArcTransition<ContainerImpl, ContainerEvent> {
+ @Override
+ public void transition(ContainerImpl container, ContainerEvent event) {
+ // Inform the localizer to decrement reference counts and cleanup
+ // resources.
+ container.dispatcher.getEventHandler().handle(
+ new ContainerLocalizationEvent(
+ LocalizationEventType.CLEANUP_CONTAINER_RESOURCES, container));
+ container.metrics.endInitingContainer();
+ ContainerKillEvent killEvent = (ContainerKillEvent) event;
+ container.diagnostics.append(killEvent.getDiagnostic()).append("\n");
+ }
+ }
+
+ @SuppressWarnings("unchecked") // dispatcher not typed
+ static class LocalizedResourceDuringKillTransition implements
+ SingleArcTransition<ContainerImpl, ContainerEvent> {
+ @Override
+ public void transition(ContainerImpl container, ContainerEvent event) {
+ ContainerResourceLocalizedEvent rsrcEvent = (ContainerResourceLocalizedEvent) event;
+ String sym = container.pendingResources.remove(rsrcEvent.getResource());
+ if (null == sym) {
+ LOG.warn("Localized unknown resource " + rsrcEvent.getResource() +
+ " for container " + container.getContainerID());
+ assert false;
+ // fail container?
+ return;
+ }
+ container.localizedResources.put(rsrcEvent.getLocation(), sym);
+ }
+ }
+
+ @SuppressWarnings("unchecked") // dispatcher not typed
+ static class KillTransition implements
+ SingleArcTransition<ContainerImpl, ContainerEvent> {
+ @Override
+ public void transition(ContainerImpl container, ContainerEvent event) {
+ // Kill the process/process-grp
+ container.dispatcher.getEventHandler().handle(
+ new ContainersLauncherEvent(container,
+ ContainersLauncherEventType.CLEANUP_CONTAINER));
+ ContainerKillEvent killEvent = (ContainerKillEvent) event;
+ container.diagnostics.append(killEvent.getDiagnostic()).append("\n");
+ }
+ }
+
+ @SuppressWarnings("unchecked") // dispatcher not typed
+ static class ContainerKilledTransition implements
+ SingleArcTransition<ContainerImpl, ContainerEvent> {
+ @Override
+ public void transition(ContainerImpl container, ContainerEvent event) {
+ ContainerExitEvent exitEvent = (ContainerExitEvent) event;
+ container.exitCode = String.valueOf(exitEvent.getExitCode());
+
+ // The process/process-grp is killed. Decrement reference counts and
+ // cleanup resources
+ container.dispatcher.getEventHandler().handle(
+ new ContainerLocalizationEvent(
+ LocalizationEventType.CLEANUP_CONTAINER_RESOURCES, container));
+ }
+ }
+
+ @SuppressWarnings("unchecked") // dispatcher not typed
+ static class ContainerDoneTransition implements
+ SingleArcTransition<ContainerImpl, ContainerEvent> {
+ @Override
+ public void transition(ContainerImpl container, ContainerEvent event) {
+ container.finished();
+ }
+ }
+
+ static class ContainerDiagnosticsUpdateTransition implements
+ SingleArcTransition<ContainerImpl, ContainerEvent> {
+ @Override
+ public void transition(ContainerImpl container, ContainerEvent event) {
+ ContainerDiagnosticsUpdateEvent updateEvent =
+ (ContainerDiagnosticsUpdateEvent) event;
+ container.diagnostics.append(updateEvent.getDiagnosticsUpdate())
+ .append("\n");
+ }
+ }
+
+ @Override
+ public void handle(ContainerEvent event) {
+ try {
+ this.writeLock.lock();
+
+ ContainerId containerID = event.getContainerID();
+ LOG.info("Processing " + containerID + " of type " + event.getType());
+
+ ContainerState oldState = stateMachine.getCurrentState();
+ ContainerState newState = null;
+ try {
+ newState =
+ stateMachine.doTransition(event.getType(), event);
+ } catch (InvalidStateTransitonException e) {
+ LOG.warn("Can't handle this event at current state", e);
+ }
+ if (oldState != newState) {
+ LOG.info("Container " + containerID + " transitioned from "
+ + oldState
+ + " to " + newState);
+ }
+ } finally {
+ this.writeLock.unlock();
+ }
+ }
+
+ @Override
+ public String toString() {
+ this.readLock.lock();
+ try {
+ return ConverterUtils.toString(launchContext.getContainerId());
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerInitEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerInitEvent.java
new file mode 100644
index 0000000..56421d9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerInitEvent.java
@@ -0,0 +1,28 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.container;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+
+public class ContainerInitEvent extends ContainerEvent {
+
+ public ContainerInitEvent(ContainerId c) {
+ super(c, ContainerEventType.INIT_CONTAINER);
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerKillEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerKillEvent.java
new file mode 100644
index 0000000..313b6a8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerKillEvent.java
@@ -0,0 +1,35 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.container;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+
+public class ContainerKillEvent extends ContainerEvent {
+
+ private final String diagnostic;
+
+ public ContainerKillEvent(ContainerId cID, String diagnostic) {
+ super(cID, ContainerEventType.KILL_CONTAINER);
+ this.diagnostic = diagnostic;
+ }
+
+ public String getDiagnostic() {
+ return this.diagnostic;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResourceEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResourceEvent.java
new file mode 100644
index 0000000..2b20159
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResourceEvent.java
@@ -0,0 +1,37 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.container;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest;
+
+public class ContainerResourceEvent extends ContainerEvent {
+
+ private final LocalResourceRequest rsrc;
+
+ public ContainerResourceEvent(ContainerId container,
+ ContainerEventType type, LocalResourceRequest rsrc) {
+ super(container, type);
+ this.rsrc = rsrc;
+ }
+
+ public LocalResourceRequest getResource() {
+ return rsrc;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResourceFailedEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResourceFailedEvent.java
new file mode 100644
index 0000000..71e3bbc
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResourceFailedEvent.java
@@ -0,0 +1,19 @@
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.container;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest;
+
+public class ContainerResourceFailedEvent extends ContainerResourceEvent {
+
+ private final Throwable exception;
+
+ public ContainerResourceFailedEvent(ContainerId container,
+ LocalResourceRequest rsrc, Throwable cause) {
+ super(container, ContainerEventType.RESOURCE_FAILED, rsrc);
+ this.exception = cause;
+ }
+
+ public Throwable getCause() {
+ return exception;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResourceLocalizedEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResourceLocalizedEvent.java
new file mode 100644
index 0000000..4b742b1
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResourceLocalizedEvent.java
@@ -0,0 +1,38 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.container;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest;
+
+public class ContainerResourceLocalizedEvent extends ContainerResourceEvent {
+
+ private final Path loc;
+
+ public ContainerResourceLocalizedEvent(ContainerId container, LocalResourceRequest rsrc,
+ Path loc) {
+ super(container, ContainerEventType.RESOURCE_LOCALIZED, rsrc);
+ this.loc = loc;
+ }
+
+ public Path getLocation() {
+ return loc;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerState.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerState.java
new file mode 100644
index 0000000..a43df89
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerState.java
@@ -0,0 +1,25 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.container;
+
+public enum ContainerState {
+ NEW, LOCALIZING, LOCALIZATION_FAILED, LOCALIZED, RUNNING, EXITED_WITH_SUCCESS,
+ EXITED_WITH_FAILURE, KILLING, CONTAINER_CLEANEDUP_AFTER_KILL,
+ CONTAINER_RESOURCES_CLEANINGUP, DONE
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
new file mode 100644
index 0000000..bcd2115
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -0,0 +1,324 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher;
+
+import static org.apache.hadoop.fs.CreateFlag.CREATE;
+import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.Callable;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode;
+import org.apache.hadoop.yarn.server.nodemanager.NMConfig;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerExitEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+public class ContainerLaunch implements Callable<Integer> {
+
+ private static final Log LOG = LogFactory.getLog(ContainerLaunch.class);
+
+ public static final String CONTAINER_SCRIPT = "task.sh";
+ public static final String FINAL_CONTAINER_TOKENS_FILE = "container_tokens";
+
+ private final Dispatcher dispatcher;
+ private final ContainerExecutor exec;
+ private final Application app;
+ private final Container container;
+ private final Configuration conf;
+ private final LocalDirAllocator logDirsSelector;
+
+ public ContainerLaunch(Configuration configuration, Dispatcher dispatcher,
+ ContainerExecutor exec, Application app, Container container) {
+ this.conf = configuration;
+ this.app = app;
+ this.exec = exec;
+ this.container = container;
+ this.dispatcher = dispatcher;
+ this.logDirsSelector = new LocalDirAllocator(NMConfig.NM_LOG_DIR);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked") // dispatcher not typed
+ public Integer call() {
+ final ContainerLaunchContext launchContext = container.getLaunchContext();
+ final Map<Path,String> localResources = container.getLocalizedResources();
+ String containerIdStr = ConverterUtils.toString(container.getContainerID());
+ final String user = launchContext.getUser();
+ final Map<String,String> env = launchContext.getAllEnv();
+ final List<String> command = launchContext.getCommandList();
+ int ret = -1;
+
+ try {
+ // /////////////////////////// Variable expansion
+ // Before the container script gets written out.
+ List<String> newCmds = new ArrayList<String>(command.size());
+ String appIdStr = app.toString();
+ Path containerLogDir =
+ this.logDirsSelector.getLocalPathForWrite(appIdStr + Path.SEPARATOR
+ + containerIdStr, LocalDirAllocator.SIZE_UNKNOWN, this.conf);
+ for (String str : command) {
+ // TODO: Should we instead work via symlinks without this grammar?
+ newCmds.add(str.replace(ApplicationConstants.LOG_DIR_EXPANSION_VAR,
+ containerLogDir.toUri().getPath()));
+ }
+ launchContext.clearCommands();
+ launchContext.addAllCommands(newCmds);
+
+ Map<String, String> envs = launchContext.getAllEnv();
+ Map<String, String> newEnvs = new HashMap<String, String>(envs.size());
+ for (Entry<String, String> entry : envs.entrySet()) {
+ newEnvs.put(
+ entry.getKey(),
+ entry.getValue().replace(
+ ApplicationConstants.LOG_DIR_EXPANSION_VAR,
+ containerLogDir.toUri().getPath()));
+ }
+ launchContext.clearEnv();
+ launchContext.addAllEnv(newEnvs);
+ // /////////////////////////// End of variable expansion
+
+ FileContext lfs = FileContext.getLocalFSFileContext();
+ LocalDirAllocator lDirAllocator =
+ new LocalDirAllocator(NMConfig.NM_LOCAL_DIR); // TODO
+ Path nmPrivateContainerScriptPath =
+ lDirAllocator.getLocalPathForWrite(
+ ResourceLocalizationService.NM_PRIVATE_DIR + Path.SEPARATOR
+ + appIdStr + Path.SEPARATOR + containerIdStr
+ + Path.SEPARATOR + CONTAINER_SCRIPT, this.conf);
+ Path nmPrivateTokensPath =
+ lDirAllocator.getLocalPathForWrite(
+ ResourceLocalizationService.NM_PRIVATE_DIR
+ + Path.SEPARATOR
+ + containerIdStr
+ + Path.SEPARATOR
+ + String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT,
+ containerIdStr), this.conf);
+ DataOutputStream containerScriptOutStream = null;
+ DataOutputStream tokensOutStream = null;
+
+ // Select the working directory for the container
+ Path containerWorkDir =
+ lDirAllocator.getLocalPathForWrite(ContainerLocalizer.USERCACHE
+ + Path.SEPARATOR + user + Path.SEPARATOR
+ + ContainerLocalizer.APPCACHE + Path.SEPARATOR + appIdStr
+ + Path.SEPARATOR + containerIdStr,
+ LocalDirAllocator.SIZE_UNKNOWN, this.conf);
+ try {
+ // /////////// Write out the container-script in the nmPrivate space.
+ String[] localDirs =
+ this.conf.getStrings(NMConfig.NM_LOCAL_DIR,
+ NMConfig.DEFAULT_NM_LOCAL_DIR);
+ List<Path> appDirs = new ArrayList<Path>(localDirs.length);
+ for (String localDir : localDirs) {
+ Path usersdir = new Path(localDir, ContainerLocalizer.USERCACHE);
+ Path userdir = new Path(usersdir, user);
+ Path appsdir = new Path(userdir, ContainerLocalizer.APPCACHE);
+ appDirs.add(new Path(appsdir, appIdStr));
+ }
+ containerScriptOutStream =
+ lfs.create(nmPrivateContainerScriptPath,
+ EnumSet.of(CREATE, OVERWRITE));
+
+ // Set the token location too.
+ env.put(ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME, new Path(
+ containerWorkDir, FINAL_CONTAINER_TOKENS_FILE).toUri().getPath());
+
+ writeLaunchEnv(containerScriptOutStream, env, localResources,
+ launchContext.getCommandList(), appDirs);
+ // /////////// End of writing out container-script
+
+ // /////////// Write out the container-tokens in the nmPrivate space.
+ tokensOutStream =
+ lfs.create(nmPrivateTokensPath, EnumSet.of(CREATE, OVERWRITE));
+ Credentials creds = container.getCredentials();
+ creds.writeTokenStorageToStream(tokensOutStream);
+ // /////////// End of writing out container-tokens
+ } finally {
+ IOUtils.cleanup(LOG, containerScriptOutStream, tokensOutStream);
+ }
+
+ // LaunchContainer is a blocking call. We are here almost means the
+ // container is launched, so send out the event.
+ dispatcher.getEventHandler().handle(new ContainerEvent(
+ container.getContainerID(),
+ ContainerEventType.CONTAINER_LAUNCHED));
+
+ ret =
+ exec.launchContainer(container, nmPrivateContainerScriptPath,
+ nmPrivateTokensPath, user, appIdStr, containerWorkDir);
+ } catch (Throwable e) {
+ LOG.warn("Failed to launch container", e);
+ dispatcher.getEventHandler().handle(new ContainerExitEvent(
+ launchContext.getContainerId(),
+ ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, ret));
+ return ret;
+ }
+
+ if (ret == ExitCode.KILLED.getExitCode()) {
+ // If the process was killed, Send container_cleanedup_after_kill and
+ // just break out of this method.
+ dispatcher.getEventHandler().handle(
+ new ContainerExitEvent(launchContext.getContainerId(),
+ ContainerEventType.CONTAINER_KILLED_ON_REQUEST, ret));
+ return ret;
+ }
+
+ if (ret != 0) {
+ LOG.warn("Container exited with a non-zero exit code " + ret);
+ this.dispatcher.getEventHandler().handle(new ContainerExitEvent(
+ launchContext.getContainerId(),
+ ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, ret));
+ return ret;
+ }
+
+ LOG.info("Container " + containerIdStr + " succeeded ");
+ dispatcher.getEventHandler().handle(
+ new ContainerEvent(launchContext.getContainerId(),
+ ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS));
+ return 0;
+ }
+
+ private static class ShellScriptBuilder {
+
+ private final StringBuilder sb;
+
+ public ShellScriptBuilder() {
+ this(new StringBuilder("#!/bin/bash\n\n"));
+ }
+
+ protected ShellScriptBuilder(StringBuilder sb) {
+ this.sb = sb;
+ }
+
+ public ShellScriptBuilder env(String key, String value) {
+ line("export ", key, "=\"", value, "\"");
+ return this;
+ }
+
+ public ShellScriptBuilder symlink(Path src, String dst) throws IOException {
+ return symlink(src, new Path(dst));
+ }
+
+ public ShellScriptBuilder symlink(Path src, Path dst) throws IOException {
+ if (!src.isAbsolute()) {
+ throw new IOException("Source must be absolute");
+ }
+ if (dst.isAbsolute()) {
+ throw new IOException("Destination must be relative");
+ }
+ if (dst.toUri().getPath().indexOf('/') != -1) {
+ line("mkdir -p ", dst.getParent().toString());
+ }
+ line("ln -sf ", src.toUri().getPath(), " ", dst.toString());
+ return this;
+ }
+
+ public void write(PrintStream out) throws IOException {
+ out.append(sb);
+ }
+
+ public void line(String... command) {
+ for (String s : command) {
+ sb.append(s);
+ }
+ sb.append("\n");
+ }
+
+ @Override
+ public String toString() {
+ return sb.toString();
+ }
+
+ }
+
+ private static void writeLaunchEnv(OutputStream out,
+ Map<String,String> environment, Map<Path,String> resources,
+ List<String> command, List<Path> appDirs)
+ throws IOException {
+ ShellScriptBuilder sb = new ShellScriptBuilder();
+ if (System.getenv("YARN_HOME") != null) {
+ // TODO: Get from whitelist.
+ sb.env("YARN_HOME", System.getenv("YARN_HOME"));
+ }
+ sb.env(ApplicationConstants.LOCAL_DIR_ENV, StringUtils.join(",", appDirs));
+ if (!Shell.WINDOWS) {
+ sb.env("JVM_PID", "$$");
+ }
+ if (environment != null) {
+ for (Map.Entry<String,String> env : environment.entrySet()) {
+ sb.env(env.getKey().toString(), env.getValue().toString());
+ }
+ }
+ if (resources != null) {
+ for (Map.Entry<Path,String> link : resources.entrySet()) {
+ sb.symlink(link.getKey(), link.getValue());
+ }
+ }
+ ArrayList<String> cmd = new ArrayList<String>(2 * command.size() + 5);
+ cmd.add(ContainerExecutor.isSetsidAvailable ? "exec setsid " : "exec ");
+ cmd.add("/bin/bash ");
+ cmd.add("-c ");
+ cmd.add("\"");
+ for (String cs : command) {
+ cmd.add(cs.toString());
+ cmd.add(" ");
+ }
+ cmd.add("\"");
+ sb.line(cmd.toArray(new String[cmd.size()]));
+ PrintStream pout = null;
+ try {
+ pout = new PrintStream(out);
+ sb.write(pout);
+ } finally {
+ if (out != null) {
+ out.close();
+ }
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java
new file mode 100644
index 0000000..ded9286
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java
@@ -0,0 +1,143 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
+import org.apache.hadoop.yarn.service.AbstractService;
+
+/**
+ * The launcher for the containers. This service should be started only after
+ * the {@link ResourceLocalizationService} is started as it depends on creation
+ * of system directories on the local file-system.
+ *
+ */
+public class ContainersLauncher extends AbstractService
+ implements EventHandler<ContainersLauncherEvent> {
+
+ private final Context context;
+ private final ContainerExecutor exec;
+ private final Dispatcher dispatcher;
+ private final ExecutorService containerLauncher =
+ Executors.newCachedThreadPool();
+ private final Map<ContainerId,RunningContainer> running =
+ Collections.synchronizedMap(new HashMap<ContainerId,RunningContainer>());
+
+ private static final class RunningContainer {
+ public RunningContainer(String string, Future<Integer> submit) {
+ this.user = string;
+ this.runningcontainer = submit;
+ }
+
+ String user;
+ Future<Integer> runningcontainer;
+ }
+
+
+ public ContainersLauncher(Context context, Dispatcher dispatcher,
+ ContainerExecutor exec) {
+ super("containers-launcher");
+ this.exec = exec;
+ this.context = context;
+ this.dispatcher = dispatcher;
+ }
+
+ @Override
+ public void init(Configuration conf) {
+ try {
+ //TODO Is this required?
+ FileContext.getLocalFSFileContext(conf);
+ } catch (UnsupportedFileSystemException e) {
+ throw new YarnException("Failed to start ContainersLauncher", e);
+ }
+ super.init(conf);
+ }
+
+ @Override
+ public void stop() {
+ containerLauncher.shutdownNow();
+ super.stop();
+ }
+
+ @Override
+ public void handle(ContainersLauncherEvent event) {
+ // TODO: ContainersLauncher launches containers one by one!!
+ Container container = event.getContainer();
+ ContainerId containerId = container.getContainerID();
+ String userName = container.getUser();
+ switch (event.getType()) {
+ case LAUNCH_CONTAINER:
+ Application app =
+ context.getApplications().get(containerId.getAppId());
+ ContainerLaunch launch =
+ new ContainerLaunch(getConfig(), dispatcher, exec, app,
+ event.getContainer());
+ running.put(containerId,
+ new RunningContainer(userName,
+ containerLauncher.submit(launch)));
+ break;
+ case CLEANUP_CONTAINER:
+ RunningContainer rContainerDatum = running.remove(containerId);
+ Future<Integer> rContainer = rContainerDatum.runningcontainer;
+ if (rContainer != null) {
+
+ if (rContainer.isDone()) {
+ // The future is already done by this time.
+ break;
+ }
+
+ // Cancel the future so that it won't be launched if it isn't already.
+ rContainer.cancel(false);
+
+ // Kill the container
+ String processId = exec.getProcessId(containerId);
+ if (processId != null) {
+ try {
+ exec.signalContainer(rContainerDatum.user,
+ processId, Signal.KILL);
+ } catch (IOException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+ }
+ break;
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncherEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncherEvent.java
new file mode 100644
index 0000000..38bedf2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncherEvent.java
@@ -0,0 +1,40 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher;
+
+import org.apache.hadoop.yarn.event.AbstractEvent;
+import org.apache.hadoop.yarn.event.Event;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+
+public class ContainersLauncherEvent
+ extends AbstractEvent<ContainersLauncherEventType>{
+
+ private final Container container;
+
+ public ContainersLauncherEvent(Container container,
+ ContainersLauncherEventType eventType) {
+ super(eventType);
+ this.container = container;
+ }
+
+ public Container getContainer() {
+ return container;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncherEventType.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncherEventType.java
new file mode 100644
index 0000000..6793bf7
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncherEventType.java
@@ -0,0 +1,24 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher;
+
+public enum ContainersLauncherEventType {
+ LAUNCH_CONTAINER,
+ CLEANUP_CONTAINER, // The process(grp) itself.
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
new file mode 100644
index 0000000..9dae769
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
@@ -0,0 +1,366 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
+
+import java.io.DataInputStream;
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.security.PrivilegedAction;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.ResourceStatusType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security.LocalizerSecurityInfo;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security.LocalizerTokenIdentifier;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security.LocalizerTokenSecretManager;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+public class ContainerLocalizer {
+
+ static final Log LOG = LogFactory.getLog(ContainerLocalizer.class);
+
+ public static final String FILECACHE = "filecache";
+ public static final String APPCACHE = "appcache";
+ public static final String USERCACHE = "usercache";
+ public static final String OUTPUTDIR = "output";
+ public static final String TOKEN_FILE_NAME_FMT = "%s.tokens";
+ public static final String WORKDIR = "work";
+ private static final String APPCACHE_CTXT_FMT = "%s.app.cache.dirs";
+ private static final String USERCACHE_CTXT_FMT = "%s.user.cache.dirs";
+
+ private final String user;
+ private final String appId;
+ private final List<Path> localDirs;
+ private final String localizerId;
+ private final FileContext lfs;
+ private final Configuration conf;
+ private final LocalDirAllocator appDirs;
+ private final LocalDirAllocator userDirs;
+ private final RecordFactory recordFactory;
+ private final Map<LocalResource,Future<Path>> pendingResources;
+
+ public ContainerLocalizer(FileContext lfs, String user, String appId,
+ String localizerId, List<Path> localDirs,
+ RecordFactory recordFactory) throws IOException {
+ if (null == user) {
+ throw new IOException("Cannot initialize for null user");
+ }
+ if (null == localizerId) {
+ throw new IOException("Cannot initialize for null containerId");
+ }
+ this.lfs = lfs;
+ this.user = user;
+ this.appId = appId;
+ this.localDirs = localDirs;
+ this.localizerId = localizerId;
+ this.recordFactory = recordFactory;
+ this.conf = new Configuration();
+ this.appDirs =
+ new LocalDirAllocator(String.format(APPCACHE_CTXT_FMT, appId));
+ this.userDirs =
+ new LocalDirAllocator(String.format(USERCACHE_CTXT_FMT, appId));
+ this.pendingResources = new HashMap<LocalResource,Future<Path>>();
+ }
+
+ LocalizationProtocol getProxy(final InetSocketAddress nmAddr) {
+ Configuration localizerConf = new Configuration();
+ YarnRPC rpc = YarnRPC.create(localizerConf);
+ if (UserGroupInformation.isSecurityEnabled()) {
+ localizerConf.setClass(
+ YarnConfiguration.YARN_SECURITY_INFO,
+ LocalizerSecurityInfo.class, SecurityInfo.class);
+ }
+ return (LocalizationProtocol)
+ rpc.getProxy(LocalizationProtocol.class, nmAddr, localizerConf);
+ }
+
+ public int runLocalization(final InetSocketAddress nmAddr)
+ throws IOException, InterruptedException {
+ // load credentials
+ initDirs(conf, user, appId, lfs, localDirs);
+ final Credentials creds = new Credentials();
+ DataInputStream credFile = null;
+ try {
+ // assume credentials in cwd
+ // TODO: Fix
+ credFile = lfs.open(
+ new Path(String.format(TOKEN_FILE_NAME_FMT, localizerId)));
+ creds.readTokenStorageStream(credFile);
+ } finally {
+ if (credFile != null) {
+ credFile.close();
+ }
+ }
+ // create localizer context
+ UserGroupInformation remoteUser =
+ UserGroupInformation.createRemoteUser(user);
+ LocalizerTokenSecretManager secretManager =
+ new LocalizerTokenSecretManager();
+ LocalizerTokenIdentifier id = secretManager.createIdentifier();
+ Token<LocalizerTokenIdentifier> localizerToken =
+ new Token<LocalizerTokenIdentifier>(id, secretManager);
+ remoteUser.addToken(localizerToken);
+ final LocalizationProtocol nodeManager =
+ remoteUser.doAs(new PrivilegedAction<LocalizationProtocol>() {
+ @Override
+ public LocalizationProtocol run() {
+ return getProxy(nmAddr);
+ }
+ });
+
+ // create user context
+ UserGroupInformation ugi =
+ UserGroupInformation.createRemoteUser(user);
+ for (Token<? extends TokenIdentifier> token : creds.getAllTokens()) {
+ ugi.addToken(token);
+ }
+
+ ExecutorService exec = null;
+ try {
+ exec = createDownloadThreadPool();
+ localizeFiles(nodeManager, exec, ugi);
+ return 0;
+ } catch (Throwable e) {
+ // Print traces to stdout so that they can be logged by the NM address
+ // space.
+ e.printStackTrace(System.out);
+ return -1;
+ } finally {
+ if (exec != null) {
+ exec.shutdownNow();
+ }
+ }
+ }
+
+ ExecutorService createDownloadThreadPool() {
+ return Executors.newSingleThreadExecutor();
+ }
+
+ Callable<Path> download(LocalDirAllocator lda, LocalResource rsrc,
+ UserGroupInformation ugi) {
+ return new FSDownload(lfs, ugi, conf, lda, rsrc, new Random());
+ }
+
+ void sleep(int duration) throws InterruptedException {
+ TimeUnit.SECONDS.sleep(duration);
+ }
+
+ private void localizeFiles(LocalizationProtocol nodemanager, ExecutorService exec,
+ UserGroupInformation ugi) {
+ while (true) {
+ try {
+ LocalizerStatus status = createStatus();
+ LocalizerHeartbeatResponse response = nodemanager.heartbeat(status);
+ switch (response.getLocalizerAction()) {
+ case LIVE:
+ List<LocalResource> newResources = response.getAllResources();
+ for (LocalResource r : newResources) {
+ if (!pendingResources.containsKey(r)) {
+ final LocalDirAllocator lda;
+ switch (r.getVisibility()) {
+ default:
+ LOG.warn("Unknown visibility: " + r.getVisibility()
+ + ", Using userDirs");
+ //Falling back to userDirs for unknown visibility.
+ case PUBLIC:
+ case PRIVATE:
+ lda = userDirs;
+ break;
+ case APPLICATION:
+ lda = appDirs;
+ break;
+ }
+ // TODO: Synchronization??
+ pendingResources.put(r, exec.submit(download(lda, r, ugi)));
+ }
+ }
+ break;
+ case DIE:
+ // killall running localizations
+ for (Future<Path> pending : pendingResources.values()) {
+ pending.cancel(true);
+ }
+ status = createStatus();
+ // ignore response
+ try {
+ nodemanager.heartbeat(status);
+ } catch (YarnRemoteException e) { }
+ return;
+ }
+ // TODO HB immediately when rsrc localized
+ sleep(1);
+ } catch (InterruptedException e) {
+ return;
+ } catch (YarnRemoteException e) {
+ // TODO cleanup
+ return;
+ }
+ }
+ }
+
+ /**
+ * Create the payload for the HeartBeat. Mainly the list of
+ * {@link LocalResourceStatus}es
+ *
+ * @return a {@link LocalizerStatus} that can be sent via heartbeat.
+ * @throws InterruptedException
+ */
+ private LocalizerStatus createStatus() throws InterruptedException {
+ final List<LocalResourceStatus> currentResources =
+ new ArrayList<LocalResourceStatus>();
+ // TODO: Synchronization??
+ for (Iterator<LocalResource> i = pendingResources.keySet().iterator();
+ i.hasNext();) {
+ LocalResource rsrc = i.next();
+ LocalResourceStatus stat =
+ recordFactory.newRecordInstance(LocalResourceStatus.class);
+ stat.setResource(rsrc);
+ Future<Path> fPath = pendingResources.get(rsrc);
+ if (fPath.isDone()) {
+ try {
+ Path localPath = fPath.get();
+ stat.setLocalPath(
+ ConverterUtils.getYarnUrlFromPath(localPath));
+ stat.setLocalSize(
+ FileUtil.getDU(new File(localPath.getParent().toString())));
+ stat.setStatus(ResourceStatusType.FETCH_SUCCESS);
+ } catch (ExecutionException e) {
+ stat.setStatus(ResourceStatusType.FETCH_FAILURE);
+ stat.setException(RPCUtil.getRemoteException(e.getCause()));
+ } catch (CancellationException e) {
+ stat.setStatus(ResourceStatusType.FETCH_FAILURE);
+ stat.setException(RPCUtil.getRemoteException(e));
+ }
+ // TODO shouldn't remove until ACK
+ i.remove();
+ } else {
+ stat.setStatus(ResourceStatusType.FETCH_PENDING);
+ }
+ currentResources.add(stat);
+ }
+ LocalizerStatus status =
+ recordFactory.newRecordInstance(LocalizerStatus.class);
+ status.setLocalizerId(localizerId);
+ status.addAllResources(currentResources);
+ return status;
+ }
+
+ public static void main(String[] argv) throws Throwable {
+ // usage: $0 user appId locId host port app_log_dir user_dir [user_dir]*
+ // let $x = $x/usercache for $local.dir
+ // MKDIR $x/$user/appcache/$appid
+ // MKDIR $x/$user/appcache/$appid/output
+ // MKDIR $x/$user/appcache/$appid/filecache
+ // LOAD $x/$user/appcache/$appid/appTokens
+ try {
+ String user = argv[0];
+ String appId = argv[1];
+ String locId = argv[2];
+ InetSocketAddress nmAddr =
+ new InetSocketAddress(argv[3], Integer.parseInt(argv[4]));
+ String[] sLocaldirs = Arrays.copyOfRange(argv, 5, argv.length);
+ ArrayList<Path> localDirs = new ArrayList<Path>(sLocaldirs.length);
+ for (String sLocaldir : sLocaldirs) {
+ localDirs.add(new Path(sLocaldir));
+ }
+
+ final String uid =
+ UserGroupInformation.getCurrentUser().getShortUserName();
+ if (!user.equals(uid)) {
+ // TODO: fail localization
+ LOG.warn("Localization running as " + uid + " not " + user);
+ }
+
+ ContainerLocalizer localizer =
+ new ContainerLocalizer(FileContext.getLocalFSFileContext(), user,
+ appId, locId, localDirs,
+ RecordFactoryProvider.getRecordFactory(null));
+ System.exit(localizer.runLocalization(nmAddr));
+ } catch (Throwable e) {
+ // Print error to stdout so that LCE can use it.
+ e.printStackTrace(System.out);
+ throw e;
+ }
+ }
+
+ private static void initDirs(Configuration conf, String user, String appId,
+ FileContext lfs, List<Path> localDirs) throws IOException {
+ if (null == localDirs || 0 == localDirs.size()) {
+ throw new IOException("Cannot initialize without local dirs");
+ }
+ String[] appsFileCacheDirs = new String[localDirs.size()];
+ String[] usersFileCacheDirs = new String[localDirs.size()];
+ for (int i = 0, n = localDirs.size(); i < n; ++i) {
+ // $x/usercache/$user
+ Path base = lfs.makeQualified(
+ new Path(new Path(localDirs.get(i), USERCACHE), user));
+ // $x/usercache/$user/filecache
+ Path userFileCacheDir = new Path(base, FILECACHE);
+ usersFileCacheDirs[i] = userFileCacheDir.toString();
+ lfs.mkdir(userFileCacheDir, null, false);
+ // $x/usercache/$user/appcache/$appId
+ Path appBase = new Path(base, new Path(APPCACHE, appId));
+ // $x/usercache/$user/appcache/$appId/filecache
+ Path appFileCacheDir = new Path(appBase, FILECACHE);
+ appsFileCacheDirs[i] = appFileCacheDir.toString();
+ lfs.mkdir(appFileCacheDir, null, false);
+ // $x/usercache/$user/appcache/$appId/output
+ lfs.mkdir(new Path(appBase, OUTPUTDIR), null, false);
+ }
+ conf.setStrings(String.format(APPCACHE_CTXT_FMT, appId), appsFileCacheDirs);
+ conf.setStrings(String.format(USERCACHE_CTXT_FMT, appId), usersFileCacheDirs);
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/FSDownload.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/FSDownload.java
new file mode 100644
index 0000000..671f3ae
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/FSDownload.java
@@ -0,0 +1,184 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.security.PrivilegedExceptionAction;
+import java.util.Random;
+import java.util.concurrent.Callable;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.RunJar;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+/**
+ * Download a single URL to the local disk.
+ *
+ */
+public class FSDownload implements Callable<Path> {
+
+ private static final Log LOG = LogFactory.getLog(FSDownload.class);
+
+ private Random rand;
+ private FileContext files;
+ private final UserGroupInformation userUgi;
+ private Configuration conf;
+ private LocalResource resource;
+ private LocalDirAllocator dirs;
+ private FsPermission cachePerms = new FsPermission((short) 0755);
+
+ FSDownload(FileContext files, UserGroupInformation ugi, Configuration conf,
+ LocalDirAllocator dirs, LocalResource resource, Random rand) {
+ this.conf = conf;
+ this.dirs = dirs;
+ this.files = files;
+ this.userUgi = ugi;
+ this.resource = resource;
+ this.rand = rand;
+ }
+
+ LocalResource getResource() {
+ return resource;
+ }
+
+ private Path copy(Path sCopy, Path dstdir) throws IOException {
+ FileSystem sourceFs = sCopy.getFileSystem(conf);
+ Path dCopy = new Path(dstdir, sCopy.getName() + ".tmp");
+ FileStatus sStat = sourceFs.getFileStatus(sCopy);
+ if (sStat.getModificationTime() != resource.getTimestamp()) {
+ throw new IOException("Resource " + sCopy +
+ " changed on src filesystem (expected " + resource.getTimestamp() +
+ ", was " + sStat.getModificationTime());
+ }
+
+ sourceFs.copyToLocalFile(sCopy, dCopy);
+ return dCopy;
+ }
+
+ private long unpack(File localrsrc, File dst) throws IOException {
+ switch (resource.getType()) {
+ case ARCHIVE:
+ String lowerDst = dst.getName().toLowerCase();
+ if (lowerDst.endsWith(".jar")) {
+ RunJar.unJar(localrsrc, dst);
+ } else if (lowerDst.endsWith(".zip")) {
+ FileUtil.unZip(localrsrc, dst);
+ } else if (lowerDst.endsWith(".tar.gz") ||
+ lowerDst.endsWith(".tgz") ||
+ lowerDst.endsWith(".tar")) {
+ FileUtil.unTar(localrsrc, dst);
+ } else {
+ LOG.warn("Cannot unpack " + localrsrc);
+ if (!localrsrc.renameTo(dst)) {
+ throw new IOException("Unable to rename file: [" + localrsrc
+ + "] to [" + dst + "]");
+ }
+ }
+ break;
+ case FILE:
+ default:
+ if (!localrsrc.renameTo(dst)) {
+ throw new IOException("Unable to rename file: [" + localrsrc
+ + "] to [" + dst + "]");
+ }
+ break;
+ }
+ return 0;
+ // TODO Should calculate here before returning
+ //return FileUtil.getDU(destDir);
+ }
+
+ @Override
+ public Path call() throws Exception {
+ final Path sCopy;
+ try {
+ sCopy = ConverterUtils.getPathFromYarnURL(resource.getResource());
+ } catch (URISyntaxException e) {
+ throw new IOException("Invalid resource", e);
+ }
+
+ Path tmp;
+ Path dst =
+ dirs.getLocalPathForWrite(".", getEstimatedSize(resource),
+ conf);
+ do {
+ tmp = new Path(dst, String.valueOf(rand.nextLong()));
+ } while (files.util().exists(tmp));
+ dst = tmp;
+ files.mkdir(dst, cachePerms, false);
+ final Path dst_work = new Path(dst + "_tmp");
+ files.mkdir(dst_work, cachePerms, false);
+
+ Path dFinal = files.makeQualified(new Path(dst_work, sCopy.getName()));
+ try {
+ Path dTmp = null == userUgi
+ ? files.makeQualified(copy(sCopy, dst_work))
+ : userUgi.doAs(new PrivilegedExceptionAction<Path>() {
+ public Path run() throws Exception {
+ return files.makeQualified(copy(sCopy, dst_work));
+ };
+ });
+ unpack(new File(dTmp.toUri()), new File(dFinal.toUri()));
+ files.rename(dst_work, dst, Rename.OVERWRITE);
+ } catch (Exception e) {
+ try { files.delete(dst, true); } catch (IOException ignore) { }
+ throw e;
+ } finally {
+ try {
+ files.delete(dst_work, true);
+ } catch (FileNotFoundException ignore) { }
+ // clear ref to internal var
+ rand = null;
+ conf = null;
+ resource = null;
+ dirs = null;
+ cachePerms = null;
+ }
+ return files.makeQualified(new Path(dst, sCopy.getName()));
+ }
+
+ private static long getEstimatedSize(LocalResource rsrc) {
+ if (rsrc.getSize() < 0) {
+ return -1;
+ }
+ switch (rsrc.getType()) {
+ case ARCHIVE:
+ return 5 * rsrc.getSize();
+ case FILE:
+ default:
+ return rsrc.getSize();
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourceRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourceRequest.java
new file mode 100644
index 0000000..35f2485
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourceRequest.java
@@ -0,0 +1,154 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
+
+import java.net.URISyntaxException;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+public class LocalResourceRequest
+ implements LocalResource, Comparable<LocalResourceRequest> {
+
+ private final Path loc;
+ private final long timestamp;
+ private final LocalResourceType type;
+
+ /**
+ * Wrap API resource to match against cache of localized resources.
+ * @param resource Resource requested by container
+ * @throws URISyntaxException If the path is malformed
+ */
+ public LocalResourceRequest(LocalResource resource)
+ throws URISyntaxException {
+ this(ConverterUtils.getPathFromYarnURL(resource.getResource()),
+ resource.getTimestamp(),
+ resource.getType());
+ }
+
+ LocalResourceRequest(Path loc, long timestamp, LocalResourceType type) {
+ this.loc = loc;
+ this.timestamp = timestamp;
+ this.type = type;
+ }
+
+ @Override
+ public int hashCode() {
+ return loc.hashCode() ^
+ (int)((timestamp >>> 32) ^ timestamp) *
+ type.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (!(o instanceof LocalResourceRequest)) {
+ return false;
+ }
+ final LocalResourceRequest other = (LocalResourceRequest) o;
+ return getPath().equals(other.getPath()) &&
+ getTimestamp() == other.getTimestamp() &&
+ getType() == other.getType();
+ }
+
+ @Override
+ public int compareTo(LocalResourceRequest other) {
+ if (this == other) {
+ return 0;
+ }
+ int ret = getPath().compareTo(other.getPath());
+ if (0 == ret) {
+ ret = (int)(getTimestamp() - other.getTimestamp());
+ if (0 == ret) {
+ ret = getType().ordinal() - other.getType().ordinal();
+ }
+ }
+ return ret;
+ }
+
+ public Path getPath() {
+ return loc;
+ }
+
+ @Override
+ public long getTimestamp() {
+ return timestamp;
+ }
+
+ @Override
+ public LocalResourceType getType() {
+ return type;
+ }
+
+ @Override
+ public URL getResource() {
+ return ConverterUtils.getYarnUrlFromPath(loc);
+ }
+
+ @Override
+ public long getSize() {
+ return -1L;
+ }
+
+ @Override
+ public LocalResourceVisibility getVisibility() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void setResource(URL resource) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void setSize(long size) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void setTimestamp(long timestamp) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void setType(LocalResourceType type) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void setVisibility(LocalResourceVisibility visibility) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("{ ");
+ sb.append(getPath().toString()).append(", ");
+ sb.append(getTimestamp()).append(", ");
+ sb.append(getType()).append(" }");
+ return sb.toString();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTracker.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTracker.java
new file mode 100644
index 0000000..b24d8af
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTracker.java
@@ -0,0 +1,40 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
+
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceEvent;
+
+/**
+ * Component tracking resources all of the same {@link LocalResourceVisibility}
+ *
+ */
+interface LocalResourcesTracker
+ extends EventHandler<ResourceEvent>, Iterable<LocalizedResource> {
+
+ // TODO: Not used at all!!
+ boolean contains(LocalResourceRequest resource);
+
+ boolean remove(LocalizedResource req, DeletionService delService);
+
+ String getUser();
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
new file mode 100644
index 0000000..283c6d4
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
@@ -0,0 +1,116 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
+
+import java.util.Iterator;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceEvent;
+
+/**
+ * A collection of {@link LocalizedResource}s all of same
+ * {@link LocalResourceVisibility}.
+ *
+ */
+class LocalResourcesTrackerImpl implements LocalResourcesTracker {
+
+ static final Log LOG = LogFactory.getLog(LocalResourcesTrackerImpl.class);
+
+ private final String user;
+ private final Dispatcher dispatcher;
+ private final ConcurrentMap<LocalResourceRequest,LocalizedResource> localrsrc;
+
+ public LocalResourcesTrackerImpl(String user, Dispatcher dispatcher) {
+ this(user, dispatcher,
+ new ConcurrentHashMap<LocalResourceRequest,LocalizedResource>());
+ }
+
+ LocalResourcesTrackerImpl(String user, Dispatcher dispatcher,
+ ConcurrentMap<LocalResourceRequest,LocalizedResource> localrsrc) {
+ this.user = user;
+ this.dispatcher = dispatcher;
+ this.localrsrc = localrsrc;
+ }
+
+ @Override
+ public void handle(ResourceEvent event) {
+ LocalResourceRequest req = event.getLocalResourceRequest();
+ LocalizedResource rsrc = localrsrc.get(req);
+ switch (event.getType()) {
+ case REQUEST:
+ case LOCALIZED:
+ if (null == rsrc) {
+ rsrc = new LocalizedResource(req, dispatcher);
+ localrsrc.put(req, rsrc);
+ }
+ break;
+ case RELEASE:
+ if (null == rsrc) {
+ LOG.info("Release unknown rsrc null (discard)");
+ return;
+ }
+ break;
+ }
+ rsrc.handle(event);
+ }
+
+ @Override
+ public boolean contains(LocalResourceRequest resource) {
+ return localrsrc.containsKey(resource);
+ }
+
+ @Override
+ public boolean remove(LocalizedResource rem, DeletionService delService) {
+ // current synchronization guaranteed by crude RLS event for cleanup
+ LocalizedResource rsrc = localrsrc.remove(rem.getRequest());
+ if (null == rsrc) {
+ LOG.error("Attempt to remove absent resource: " + rem.getRequest() +
+ " from " + getUser());
+ return true;
+ }
+ if (rsrc.getRefCount() > 0
+ || ResourceState.DOWNLOADING.equals(rsrc.getState())
+ || rsrc != rem) {
+ // internal error
+ LOG.error("Attempt to remove resource with non-zero refcount");
+ assert false;
+ return false;
+ }
+ if (ResourceState.LOCALIZED.equals(rsrc.getState())) {
+ delService.delete(getUser(), rsrc.getLocalPath());
+ }
+ return true;
+ }
+
+ @Override
+ public String getUser() {
+ return user;
+ }
+
+ @Override
+ public Iterator<LocalizedResource> iterator() {
+ return localrsrc.values().iterator();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
new file mode 100644
index 0000000..2ba25bf
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
@@ -0,0 +1,288 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
+
+import java.util.EnumSet;
+import java.util.LinkedList;
+import java.util.Queue;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerResourceLocalizedEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerResourceRequestEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceLocalizedEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceReleaseEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceRequestEvent;
+import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.MultipleArcTransition;
+import org.apache.hadoop.yarn.state.SingleArcTransition;
+import org.apache.hadoop.yarn.state.StateMachine;
+import org.apache.hadoop.yarn.state.StateMachineFactory;
+
+/**
+ * Datum representing a localized resource. Holds the statemachine of a
+ * resource. State of the resource is one of {@link ResourceState}.
+ *
+ */
+public class LocalizedResource implements EventHandler<ResourceEvent> {
+
+ private static final Log LOG = LogFactory.getLog(LocalizedResource.class);
+
+ Path localPath;
+ long size = -1;
+ final LocalResourceRequest rsrc;
+ final Dispatcher dispatcher;
+ final StateMachine<ResourceState,ResourceEventType,ResourceEvent>
+ stateMachine;
+ final Semaphore sem = new Semaphore(1);
+ final Queue<ContainerId> ref; // Queue of containers using this localized
+ // resource
+ private final Lock readLock;
+ private final Lock writeLock;
+
+ final AtomicLong timestamp = new AtomicLong(currentTime());
+
+ private static final StateMachineFactory<LocalizedResource,ResourceState,
+ ResourceEventType,ResourceEvent> stateMachineFactory =
+ new StateMachineFactory<LocalizedResource,ResourceState,
+ ResourceEventType,ResourceEvent>(ResourceState.INIT)
+
+ // From INIT (ref == 0, awaiting req)
+ .addTransition(ResourceState.INIT, ResourceState.DOWNLOADING,
+ ResourceEventType.REQUEST, new FetchResourceTransition())
+ .addTransition(ResourceState.INIT, ResourceState.LOCALIZED,
+ ResourceEventType.LOCALIZED, new FetchDirectTransition())
+ .addTransition(ResourceState.INIT, ResourceState.INIT,
+ ResourceEventType.RELEASE, new ReleaseTransition())
+
+ // From DOWNLOADING (ref > 0, may be localizing)
+ .addTransition(ResourceState.DOWNLOADING, ResourceState.DOWNLOADING,
+ ResourceEventType.REQUEST, new FetchResourceTransition()) // TODO: Duplicate addition!!
+ .addTransition(ResourceState.DOWNLOADING, ResourceState.LOCALIZED,
+ ResourceEventType.LOCALIZED, new FetchSuccessTransition())
+ .addTransition(ResourceState.DOWNLOADING,
+ EnumSet.of(ResourceState.DOWNLOADING, ResourceState.INIT),
+ ResourceEventType.RELEASE, new ReleasePendingTransition())
+
+ // From LOCALIZED (ref >= 0, on disk)
+ .addTransition(ResourceState.LOCALIZED, ResourceState.LOCALIZED,
+ ResourceEventType.REQUEST, new LocalizedResourceTransition())
+ .addTransition(ResourceState.LOCALIZED, ResourceState.LOCALIZED,
+ ResourceEventType.LOCALIZED)
+ .addTransition(ResourceState.LOCALIZED, ResourceState.LOCALIZED,
+ ResourceEventType.RELEASE, new ReleaseTransition())
+ .installTopology();
+
+ public LocalizedResource(LocalResourceRequest rsrc, Dispatcher dispatcher) {
+ this.rsrc = rsrc;
+ this.dispatcher = dispatcher;
+ this.ref = new LinkedList<ContainerId>();
+
+ ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
+ this.readLock = readWriteLock.readLock();
+ this.writeLock = readWriteLock.writeLock();
+
+ this.stateMachine = stateMachineFactory.make(this);
+ }
+
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("{ ").append(rsrc.toString()).append(",")
+ .append(getState() == ResourceState.LOCALIZED
+ ? getLocalPath() + "," + getSize()
+ : "pending").append(",[");
+ for (ContainerId c : ref) {
+ sb.append("(").append(c.toString()).append(")");
+ }
+ sb.append("],").append(getTimestamp()).append("}");
+ return sb.toString();
+ }
+
+ private void release(ContainerId container) {
+ if (!ref.remove(container)) {
+ LOG.info("Attempt to release claim on " + this +
+ " from unregistered container " + container);
+ assert false; // TODO: FIX
+ }
+ timestamp.set(currentTime());
+ }
+
+ private long currentTime() {
+ return System.nanoTime();
+ }
+
+ public ResourceState getState() {
+ this.readLock.lock();
+ try {
+ return stateMachine.getCurrentState();
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ public LocalResourceRequest getRequest() {
+ return rsrc;
+ }
+
+ public Path getLocalPath() {
+ return localPath;
+ }
+
+ public long getTimestamp() {
+ return timestamp.get();
+ }
+
+ public long getSize() {
+ return size;
+ }
+
+ public int getRefCount() {
+ return ref.size();
+ }
+
+ public boolean tryAcquire() {
+ return sem.tryAcquire();
+ }
+
+ public void unlock() {
+ sem.release();
+ }
+
+ @Override
+ public void handle(ResourceEvent event) {
+ try {
+ this.writeLock.lock();
+
+ Path resourcePath = event.getLocalResourceRequest().getPath();
+ LOG.info("Processing " + resourcePath + " of type " + event.getType());
+
+ ResourceState oldState = this.stateMachine.getCurrentState();
+ ResourceState newState = null;
+ try {
+ newState = this.stateMachine.doTransition(event.getType(), event);
+ } catch (InvalidStateTransitonException e) {
+ LOG.warn("Can't handle this event at current state", e);
+ }
+ if (oldState != newState) {
+ LOG.info("Resource " + resourcePath + " transitioned from "
+ + oldState
+ + " to " + newState);
+ }
+ } finally {
+ this.writeLock.unlock();
+ }
+ }
+
+ static abstract class ResourceTransition implements
+ SingleArcTransition<LocalizedResource,ResourceEvent> {
+ // typedef
+ }
+
+ @SuppressWarnings("unchecked") // dispatcher not typed
+ private static class FetchResourceTransition extends ResourceTransition {
+ @Override
+ public void transition(LocalizedResource rsrc, ResourceEvent event) {
+ ResourceRequestEvent req = (ResourceRequestEvent) event;
+ LocalizerContext ctxt = req.getContext();
+ ContainerId container = ctxt.getContainerId();
+ rsrc.ref.add(container);
+ rsrc.dispatcher.getEventHandler().handle(
+ new LocalizerResourceRequestEvent(rsrc, req.getVisibility(), ctxt));
+ }
+ }
+
+ private static class FetchDirectTransition extends FetchSuccessTransition {
+ @Override
+ public void transition(LocalizedResource rsrc, ResourceEvent event) {
+ LOG.warn("Resource " + rsrc + " localized without listening container");
+ super.transition(rsrc, event);
+ }
+ }
+
+ /**
+ * Resource localized, notify waiting containers.
+ */
+ @SuppressWarnings("unchecked") // dispatcher not typed
+ private static class FetchSuccessTransition extends ResourceTransition {
+ @Override
+ public void transition(LocalizedResource rsrc, ResourceEvent event) {
+ ResourceLocalizedEvent locEvent = (ResourceLocalizedEvent) event;
+ rsrc.localPath = locEvent.getLocation();
+ rsrc.size = locEvent.getSize();
+ for (ContainerId container : rsrc.ref) {
+ rsrc.dispatcher.getEventHandler().handle(
+ new ContainerResourceLocalizedEvent(
+ container, rsrc.rsrc, rsrc.localPath));
+ }
+ }
+ }
+
+ /**
+ * Resource already localized, notify immediately.
+ */
+ @SuppressWarnings("unchecked") // dispatcher not typed
+ private static class LocalizedResourceTransition
+ extends ResourceTransition {
+ @Override
+ public void transition(LocalizedResource rsrc, ResourceEvent event) {
+ // notify waiting containers
+ ResourceRequestEvent reqEvent = (ResourceRequestEvent) event;
+ ContainerId container = reqEvent.getContext().getContainerId();
+ rsrc.ref.add(container);
+ rsrc.dispatcher.getEventHandler().handle(
+ new ContainerResourceLocalizedEvent(
+ container, rsrc.rsrc, rsrc.localPath));
+ }
+ }
+
+ /**
+ * Decrement resource count, update timestamp.
+ */
+ private static class ReleaseTransition extends ResourceTransition {
+ @Override
+ public void transition(LocalizedResource rsrc, ResourceEvent event) {
+ // Note: assumes that localizing container must succeed or fail
+ ResourceReleaseEvent relEvent = (ResourceReleaseEvent) event;
+ rsrc.release(relEvent.getContainer());
+ }
+ }
+
+ private static class ReleasePendingTransition implements
+ MultipleArcTransition<LocalizedResource,ResourceEvent,ResourceState> {
+ @Override
+ public ResourceState transition(LocalizedResource rsrc,
+ ResourceEvent event) {
+ ResourceReleaseEvent relEvent = (ResourceReleaseEvent) event;
+ rsrc.release(relEvent.getContainer());
+ return rsrc.ref.isEmpty()
+ ? ResourceState.INIT
+ : ResourceState.DOWNLOADING;
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizerContext.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizerContext.java
new file mode 100644
index 0000000..25a7d8d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizerContext.java
@@ -0,0 +1,49 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
+
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+
+public class LocalizerContext {
+
+ private final String user;
+ private final ContainerId containerId;
+ private final Credentials credentials;
+
+ public LocalizerContext(String user, ContainerId containerId,
+ Credentials credentials) {
+ this.user = user;
+ this.containerId = containerId;
+ this.credentials = credentials;
+ }
+
+ public String getUser() {
+ return user;
+ }
+
+ public ContainerId getContainerId() {
+ return containerId;
+ }
+
+ public Credentials getCredentials() {
+ return credentials;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
new file mode 100644
index 0000000..94a2eb6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -0,0 +1,818 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
+
+import java.io.DataOutputStream;
+import java.io.File;
+
+import java.net.URISyntaxException;
+
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+
+import static org.apache.hadoop.fs.CreateFlag.CREATE;
+import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.DEFAULT_MAX_PUBLIC_FETCH_THREADS;
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.DEFAULT_NM_CACHE_CLEANUP_MS;
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.DEFAULT_NM_LOCALIZER_BIND_ADDRESS;
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.DEFAULT_NM_LOCAL_DIR;
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.DEFAULT_NM_LOG_DIR;
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.DEFAULT_NM_TARGET_CACHE_MB;
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.NM_CACHE_CLEANUP_MS;
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.NM_LOCALIZER_BIND_ADDRESS;
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.NM_LOCAL_DIR;
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.NM_LOG_DIR;
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.NM_MAX_PUBLIC_FETCH_THREADS;
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.NM_TARGET_CACHE_MB;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.avro.ipc.Server;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.NMConfig;
+import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerAction;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationInitedEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerResourceFailedEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ApplicationLocalizationEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationRequestEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerResourceRequestEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceLocalizedEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceRequestEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security.LocalizerSecurityInfo;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security.LocalizerTokenSecretManager;
+import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+public class ResourceLocalizationService extends AbstractService
+ implements EventHandler<LocalizationEvent>, LocalizationProtocol {
+
+ private static final Log LOG = LogFactory.getLog(ResourceLocalizationService.class);
+ public static final String NM_PRIVATE_DIR = "nmPrivate";
+ public static final FsPermission NM_PRIVATE_PERM = new FsPermission((short) 0700);
+
+ private Server server;
+ private InetSocketAddress localizationServerAddress;
+ private long cacheTargetSize;
+ private long cacheCleanupPeriod;
+ private List<Path> logDirs;
+ private List<Path> localDirs;
+ private List<Path> sysDirs;
+ private final ContainerExecutor exec;
+ protected final Dispatcher dispatcher;
+ private final DeletionService delService;
+ private LocalizerTracker localizerTracker;
+ private RecordFactory recordFactory;
+ private final LocalDirAllocator localDirsSelector;
+ private final ScheduledExecutorService cacheCleanup;
+
+ private final LocalResourcesTracker publicRsrc;
+ private final ConcurrentMap<String,LocalResourcesTracker> privateRsrc =
+ new ConcurrentHashMap<String,LocalResourcesTracker>();
+ private final ConcurrentMap<String,LocalResourcesTracker> appRsrc =
+ new ConcurrentHashMap<String,LocalResourcesTracker>();
+
+ public ResourceLocalizationService(Dispatcher dispatcher,
+ ContainerExecutor exec, DeletionService delService) {
+ super(ResourceLocalizationService.class.getName());
+ this.exec = exec;
+ this.dispatcher = dispatcher;
+ this.delService = delService;
+ this.localDirsSelector = new LocalDirAllocator(NMConfig.NM_LOCAL_DIR);
+ this.publicRsrc = new LocalResourcesTrackerImpl(null, dispatcher);
+ this.cacheCleanup = new ScheduledThreadPoolExecutor(1);
+ }
+
+ FileContext getLocalFileContext(Configuration conf) {
+ try {
+ return FileContext.getLocalFSFileContext(conf);
+ } catch (IOException e) {
+ throw new YarnException("Failed to access local fs");
+ }
+ }
+
+ @Override
+ public void init(Configuration conf) {
+ this.recordFactory = RecordFactoryProvider.getRecordFactory(conf);
+ try {
+ // TODO queue deletions here, rather than NM init?
+ FileContext lfs = getLocalFileContext(conf);
+ String[] sLocalDirs =
+ conf.getStrings(NM_LOCAL_DIR, DEFAULT_NM_LOCAL_DIR);
+
+ localDirs = new ArrayList<Path>(sLocalDirs.length);
+ logDirs = new ArrayList<Path>(sLocalDirs.length);
+ sysDirs = new ArrayList<Path>(sLocalDirs.length);
+ for (String sLocaldir : sLocalDirs) {
+ Path localdir = new Path(sLocaldir);
+ localDirs.add(localdir);
+ // $local/usercache
+ Path userdir = new Path(localdir, ContainerLocalizer.USERCACHE);
+ lfs.mkdir(userdir, null, true);
+ // $local/filecache
+ Path filedir = new Path(localdir, ContainerLocalizer.FILECACHE);
+ lfs.mkdir(filedir, null, true);
+ // $local/nmPrivate
+ Path sysdir = new Path(localdir, NM_PRIVATE_DIR);
+ lfs.mkdir(sysdir, NM_PRIVATE_PERM, true);
+ sysDirs.add(sysdir);
+ }
+ String[] sLogdirs = conf.getStrings(NM_LOG_DIR, DEFAULT_NM_LOG_DIR);
+ for (String sLogdir : sLogdirs) {
+ Path logdir = new Path(sLogdir);
+ logDirs.add(logdir);
+ lfs.mkdir(logdir, null, true);
+ }
+ } catch (IOException e) {
+ throw new YarnException("Failed to initialize LocalizationService", e);
+ }
+ localDirs = Collections.unmodifiableList(localDirs);
+ logDirs = Collections.unmodifiableList(logDirs);
+ sysDirs = Collections.unmodifiableList(sysDirs);
+ cacheTargetSize =
+ conf.getLong(NM_TARGET_CACHE_MB, DEFAULT_NM_TARGET_CACHE_MB) << 20;
+ cacheCleanupPeriod =
+ conf.getLong(NM_CACHE_CLEANUP_MS, DEFAULT_NM_CACHE_CLEANUP_MS);
+ localizationServerAddress = NetUtils.createSocketAddr(
+ conf.get(NM_LOCALIZER_BIND_ADDRESS, DEFAULT_NM_LOCALIZER_BIND_ADDRESS));
+ localizerTracker = new LocalizerTracker(conf);
+ dispatcher.register(LocalizerEventType.class, localizerTracker);
+ cacheCleanup.scheduleWithFixedDelay(new CacheCleanup(dispatcher),
+ cacheCleanupPeriod, cacheCleanupPeriod, TimeUnit.MILLISECONDS);
+ super.init(conf);
+ }
+
+ @Override
+ public LocalizerHeartbeatResponse heartbeat(LocalizerStatus status) {
+ return localizerTracker.processHeartbeat(status);
+ }
+
+ @Override
+ public void start() {
+ server = createServer();
+ LOG.info("Localizer started on port " + server.getPort());
+ server.start();
+ super.start();
+ }
+
+ Server createServer() {
+ YarnRPC rpc = YarnRPC.create(getConfig());
+ Configuration conf = new Configuration(getConfig()); // Clone to separate
+ // sec-info classes
+ LocalizerTokenSecretManager secretManager = null;
+ if (UserGroupInformation.isSecurityEnabled()) {
+ conf.setClass(YarnConfiguration.YARN_SECURITY_INFO,
+ LocalizerSecurityInfo.class, SecurityInfo.class);
+ secretManager = new LocalizerTokenSecretManager();
+ }
+
+ return rpc.getServer(LocalizationProtocol.class, this,
+ localizationServerAddress, conf, secretManager,
+ conf.getInt(NMConfig.NM_LOCALIZATION_THREADS,
+ NMConfig.DEFAULT_NM_LOCALIZATION_THREADS));
+
+ }
+
+ @Override
+ public void stop() {
+ if (server != null) {
+ server.close();
+ }
+ if (localizerTracker != null) {
+ localizerTracker.stop();
+ }
+ super.stop();
+ }
+
+ @Override
+ @SuppressWarnings("unchecked") // dispatcher not typed
+ public void handle(LocalizationEvent event) {
+ String userName;
+ String appIDStr;
+ // TODO: create log dir as $logdir/$user/$appId
+ switch (event.getType()) {
+ case INIT_APPLICATION_RESOURCES:
+ Application app =
+ ((ApplicationLocalizationEvent)event).getApplication();
+ // 0) Create application tracking structs
+ userName = app.getUser();
+ privateRsrc.putIfAbsent(userName,
+ new LocalResourcesTrackerImpl(userName, dispatcher));
+ if (null != appRsrc.putIfAbsent(ConverterUtils.toString(app.getAppId()),
+ new LocalResourcesTrackerImpl(app.getUser(), dispatcher))) {
+ LOG.warn("Initializing application " + app + " already present");
+ assert false; // TODO: FIXME assert doesn't help
+ // ^ The condition is benign. Tests should fail and it
+ // should appear in logs, but it's an internal error
+ // that should have no effect on applications
+ }
+ // 1) Signal container init
+ dispatcher.getEventHandler().handle(new ApplicationInitedEvent(
+ app.getAppId()));
+ break;
+ case INIT_CONTAINER_RESOURCES:
+ ContainerLocalizationRequestEvent rsrcReqs =
+ (ContainerLocalizationRequestEvent) event;
+ Container c = rsrcReqs.getContainer();
+ LocalizerContext ctxt = new LocalizerContext(
+ c.getUser(), c.getContainerID(), c.getCredentials());
+ final LocalResourcesTracker tracker;
+ LocalResourceVisibility vis = rsrcReqs.getVisibility();
+ switch (vis) {
+ default:
+ case PUBLIC:
+ tracker = publicRsrc;
+ break;
+ case PRIVATE:
+ tracker = privateRsrc.get(c.getUser());
+ break;
+ case APPLICATION:
+ tracker =
+ appRsrc.get(ConverterUtils.toString(c.getContainerID().getAppId()));
+ break;
+ }
+ // We get separate events one each for all resources of one visibility. So
+ // all the resources in this event are of the same visibility.
+ for (LocalResourceRequest req : rsrcReqs.getRequestedResources()) {
+ tracker.handle(new ResourceRequestEvent(req, vis, ctxt));
+ }
+ break;
+ case CACHE_CLEANUP:
+ ResourceRetentionSet retain =
+ new ResourceRetentionSet(delService, cacheTargetSize);
+ retain.addResources(publicRsrc);
+ LOG.debug("Resource cleanup (public) " + retain);
+ for (LocalResourcesTracker t : privateRsrc.values()) {
+ retain.addResources(t);
+ LOG.debug("Resource cleanup " + t.getUser() + ":" + retain);
+ }
+ break;
+ case CLEANUP_CONTAINER_RESOURCES:
+ Container container =
+ ((ContainerLocalizationEvent)event).getContainer();
+
+ // Delete the container directories
+ userName = container.getUser();
+ String containerIDStr = container.toString();
+ appIDStr =
+ ConverterUtils.toString(container.getContainerID().getAppId());
+ for (Path localDir : localDirs) {
+
+ // Delete the user-owned container-dir
+ Path usersdir = new Path(localDir, ContainerLocalizer.USERCACHE);
+ Path userdir = new Path(usersdir, userName);
+ Path allAppsdir = new Path(userdir, ContainerLocalizer.APPCACHE);
+ Path appDir = new Path(allAppsdir, appIDStr);
+ Path containerDir = new Path(appDir, containerIDStr);
+ delService.delete(userName, containerDir, new Path[] {});
+
+ // Delete the nmPrivate container-dir
+ Path sysDir = new Path(localDir, NM_PRIVATE_DIR);
+ Path appSysDir = new Path(sysDir, appIDStr);
+ Path containerSysDir = new Path(appSysDir, containerIDStr);
+ delService.delete(null, containerSysDir, new Path[] {});
+ }
+
+ dispatcher.getEventHandler().handle(new ContainerEvent(
+ container.getContainerID(),
+ ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP));
+ break;
+ case DESTROY_APPLICATION_RESOURCES:
+
+ Application application =
+ ((ApplicationLocalizationEvent) event).getApplication();
+ LocalResourcesTracker appLocalRsrcsTracker =
+ appRsrc.remove(ConverterUtils.toString(application.getAppId()));
+ if (null == appLocalRsrcsTracker) {
+ LOG.warn("Removing uninitialized application " + application);
+ }
+ // TODO: What to do with appLocalRsrcsTracker?
+
+ // Delete the application directories
+ userName = application.getUser();
+ appIDStr = application.toString();
+ for (Path localDir : localDirs) {
+
+ // Delete the user-owned app-dir
+ Path usersdir = new Path(localDir, ContainerLocalizer.USERCACHE);
+ Path userdir = new Path(usersdir, userName);
+ Path allAppsdir = new Path(userdir, ContainerLocalizer.APPCACHE);
+ Path appDir = new Path(allAppsdir, appIDStr);
+ delService.delete(userName, appDir, new Path[] {});
+
+ // Delete the nmPrivate app-dir
+ Path sysDir = new Path(localDir, NM_PRIVATE_DIR);
+ Path appSysDir = new Path(sysDir, appIDStr);
+ delService.delete(null, appSysDir, new Path[] {});
+ }
+
+ // TODO: decrement reference counts of all resources associated with this
+ // app
+
+ dispatcher.getEventHandler().handle(new ApplicationEvent(
+ application.getAppId(),
+ ApplicationEventType.APPLICATION_RESOURCES_CLEANEDUP));
+ break;
+ }
+ }
+
+ /**
+ * Sub-component handling the spawning of {@link ContainerLocalizer}s
+ */
+ class LocalizerTracker implements EventHandler<LocalizerEvent> {
+
+ private final PublicLocalizer publicLocalizer;
+ private final Map<String,LocalizerRunner> privLocalizers;
+
+ LocalizerTracker(Configuration conf) {
+ this(conf, new HashMap<String,LocalizerRunner>());
+ }
+
+ LocalizerTracker(Configuration conf,
+ Map<String,LocalizerRunner> privLocalizers) {
+ this.publicLocalizer = new PublicLocalizer(conf);
+ this.privLocalizers = privLocalizers;
+ publicLocalizer.start();
+ }
+
+ public LocalizerHeartbeatResponse processHeartbeat(LocalizerStatus status) {
+ String locId = status.getLocalizerId();
+ synchronized (privLocalizers) {
+ LocalizerRunner localizer = privLocalizers.get(locId);
+ if (null == localizer) {
+ // TODO process resources anyway
+ LOG.info("Unknown localizer with localizerId " + locId
+ + " is sending heartbeat. Ordering it to DIE");
+ LocalizerHeartbeatResponse response =
+ recordFactory.newRecordInstance(LocalizerHeartbeatResponse.class);
+ response.setLocalizerAction(LocalizerAction.DIE);
+ return response;
+ }
+ return localizer.update(status.getResources());
+ }
+ }
+
+ public void stop() {
+ for (LocalizerRunner localizer : privLocalizers.values()) {
+ localizer.interrupt();
+ }
+ publicLocalizer.interrupt();
+ }
+
+ @Override
+ public void handle(LocalizerEvent event) {
+ String locId = event.getLocalizerId();
+ switch (event.getType()) {
+ case REQUEST_RESOURCE_LOCALIZATION:
+ // 0) find running localizer or start new thread
+ LocalizerResourceRequestEvent req =
+ (LocalizerResourceRequestEvent)event;
+ switch (req.getVisibility()) {
+ case PUBLIC:
+ publicLocalizer.addResource(req);
+ break;
+ case PRIVATE:
+ case APPLICATION:
+ synchronized (privLocalizers) {
+ LocalizerRunner localizer = privLocalizers.get(locId);
+ if (null == localizer) {
+ LOG.info("Created localizer for " + req.getLocalizerId());
+ localizer = new LocalizerRunner(req.getContext(),
+ req.getLocalizerId());
+ privLocalizers.put(locId, localizer);
+ localizer.start();
+ }
+ // 1) propagate event
+ localizer.addResource(req);
+ }
+ break;
+ }
+ break;
+ case ABORT_LOCALIZATION:
+ // 0) find running localizer, interrupt and remove
+ synchronized (privLocalizers) {
+ LocalizerRunner localizer = privLocalizers.get(locId);
+ if (null == localizer) {
+ return; // ignore; already gone
+ }
+ privLocalizers.remove(locId);
+ localizer.interrupt();
+ }
+ break;
+ }
+ }
+
+ }
+
+ class PublicLocalizer extends Thread {
+
+ static final String PUBCACHE_CTXT = "public.cache.dirs";
+
+ final FileContext lfs;
+ final Configuration conf;
+ final ExecutorService threadPool;
+ final LocalDirAllocator publicDirs;
+ final CompletionService<Path> queue;
+ final Map<Future<Path>,LocalizerResourceRequestEvent> pending;
+ // TODO hack to work around broken signaling
+ final Map<LocalResourceRequest,List<LocalizerResourceRequestEvent>> attempts;
+
+ PublicLocalizer(Configuration conf) {
+ this(conf, getLocalFileContext(conf),
+ Executors.newFixedThreadPool(conf.getInt(
+ NM_MAX_PUBLIC_FETCH_THREADS, DEFAULT_MAX_PUBLIC_FETCH_THREADS)),
+ new HashMap<Future<Path>,LocalizerResourceRequestEvent>(),
+ new HashMap<LocalResourceRequest,List<LocalizerResourceRequestEvent>>());
+ }
+
+ PublicLocalizer(Configuration conf, FileContext lfs,
+ ExecutorService threadPool,
+ Map<Future<Path>,LocalizerResourceRequestEvent> pending,
+ Map<LocalResourceRequest,List<LocalizerResourceRequestEvent>> attempts) {
+ this.lfs = lfs;
+ this.conf = conf;
+ this.pending = pending;
+ this.attempts = attempts;
+ String[] publicFilecache = new String[localDirs.size()];
+ for (int i = 0, n = localDirs.size(); i < n; ++i) {
+ publicFilecache[i] =
+ new Path(localDirs.get(i), ContainerLocalizer.FILECACHE).toString();
+ }
+ conf.setStrings(PUBCACHE_CTXT, publicFilecache);
+ this.publicDirs = new LocalDirAllocator(PUBCACHE_CTXT);
+ this.threadPool = threadPool;
+ this.queue = new ExecutorCompletionService<Path>(threadPool);
+ }
+
+ public void addResource(LocalizerResourceRequestEvent request) {
+ // TODO handle failures, cancellation, requests by other containers
+ LocalResourceRequest key = request.getResource().getRequest();
+ LOG.info("Downloading public rsrc:" + key);
+ synchronized (attempts) {
+ List<LocalizerResourceRequestEvent> sigh = attempts.get(key);
+ if (null == sigh) {
+ pending.put(queue.submit(new FSDownload(
+ lfs, null, conf, publicDirs,
+ request.getResource().getRequest(), new Random())),
+ request);
+ attempts.put(key, new LinkedList<LocalizerResourceRequestEvent>());
+ } else {
+ sigh.add(request);
+ }
+ }
+ }
+
+ @Override
+ public void run() {
+ try {
+ // TODO shutdown, better error handling esp. DU
+ while (!Thread.currentThread().isInterrupted()) {
+ try {
+ Future<Path> completed = queue.take();
+ LocalizerResourceRequestEvent assoc = pending.remove(completed);
+ try {
+ Path local = completed.get();
+ if (null == assoc) {
+ LOG.error("Localized unkonwn resource to " + completed);
+ // TODO delete
+ return;
+ }
+ LocalResourceRequest key = assoc.getResource().getRequest();
+ assoc.getResource().handle(
+ new ResourceLocalizedEvent(key,
+ local, FileUtil.getDU(new File(local.toUri()))));
+ synchronized (attempts) {
+ attempts.remove(key);
+ }
+ } catch (ExecutionException e) {
+ LOG.info("Failed to download rsrc " + assoc.getResource(),
+ e.getCause());
+ dispatcher.getEventHandler().handle(
+ new ContainerResourceFailedEvent(
+ assoc.getContext().getContainerId(),
+ assoc.getResource().getRequest(), e.getCause()));
+ synchronized (attempts) {
+ LocalResourceRequest req = assoc.getResource().getRequest();
+ List<LocalizerResourceRequestEvent> reqs = attempts.get(req);
+ if (null == reqs) {
+ LOG.error("Missing pending list for " + req);
+ return;
+ }
+ if (reqs.isEmpty()) {
+ attempts.remove(req);
+ }
+ /*
+ * Do not retry for now. Once failed is failed!
+ * LocalizerResourceRequestEvent request = reqs.remove(0);
+
+ pending.put(queue.submit(new FSDownload(
+ lfs, null, conf, publicDirs,
+ request.getResource().getRequest(), new Random())),
+ request);
+ */ }
+ } catch (CancellationException e) {
+ // ignore; shutting down
+ }
+ } catch (InterruptedException e) {
+ return;
+ }
+ }
+ } catch(Throwable t) {
+ LOG.fatal("Error: Shutting down", t);
+ } finally {
+ LOG.info("Public cache exiting");
+ threadPool.shutdownNow();
+ }
+ }
+
+ }
+
+ /**
+ * Runs the {@link ContainerLocalizer} itself in a separate process with
+ * access to user's credentials. One {@link LocalizerRunner} per localizerId.
+ *
+ */
+ class LocalizerRunner extends Thread {
+
+ final LocalizerContext context;
+ final String localizerId;
+ final Map<LocalResourceRequest,LocalizerResourceRequestEvent> scheduled;
+ final List<LocalizerResourceRequestEvent> pending;
+
+ // TODO: threadsafe, use outer?
+ private final RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(getConfig());
+
+ LocalizerRunner(LocalizerContext context, String localizerId) {
+ this.context = context;
+ this.localizerId = localizerId;
+ this.pending = new ArrayList<LocalizerResourceRequestEvent>();
+ this.scheduled =
+ new HashMap<LocalResourceRequest, LocalizerResourceRequestEvent>();
+ }
+
+ public void addResource(LocalizerResourceRequestEvent request) {
+ // TDOO: Synchronization
+ pending.add(request);
+ }
+
+ /**
+ * Find next resource to be given to a spawned localizer.
+ *
+ * @return
+ */
+ private LocalResource findNextResource() {
+ // TODO: Synchronization
+ for (Iterator<LocalizerResourceRequestEvent> i = pending.iterator();
+ i.hasNext();) {
+ LocalizerResourceRequestEvent evt = i.next();
+ LocalizedResource nRsrc = evt.getResource();
+ if (ResourceState.LOCALIZED.equals(nRsrc.getState())) {
+ i.remove();
+ continue;
+ }
+ if (nRsrc.tryAcquire()) {
+ LocalResourceRequest nextRsrc = nRsrc.getRequest();
+ LocalResource next =
+ recordFactory.newRecordInstance(LocalResource.class);
+ next.setResource(
+ ConverterUtils.getYarnUrlFromPath(nextRsrc.getPath()));
+ next.setTimestamp(nextRsrc.getTimestamp());
+ next.setType(nextRsrc.getType());
+ next.setVisibility(evt.getVisibility());
+ scheduled.put(nextRsrc, evt);
+ return next;
+ }
+ }
+ return null;
+ }
+
+ // TODO this sucks. Fix it later
+ LocalizerHeartbeatResponse update(
+ List<LocalResourceStatus> remoteResourceStatuses) {
+ LocalizerHeartbeatResponse response =
+ recordFactory.newRecordInstance(LocalizerHeartbeatResponse.class);
+
+ // The localizer has just spawned. Start giving it resources for
+ // remote-fetching.
+ if (remoteResourceStatuses.isEmpty()) {
+ LocalResource next = findNextResource();
+ if (next != null) {
+ response.setLocalizerAction(LocalizerAction.LIVE);
+ response.addResource(next);
+ } else if (pending.isEmpty()) {
+ // TODO: Synchronization
+ response.setLocalizerAction(LocalizerAction.DIE);
+ } else {
+ response.setLocalizerAction(LocalizerAction.LIVE);
+ }
+ return response;
+ }
+
+ for (LocalResourceStatus stat : remoteResourceStatuses) {
+ LocalResource rsrc = stat.getResource();
+ LocalResourceRequest req = null;
+ try {
+ req = new LocalResourceRequest(rsrc);
+ } catch (URISyntaxException e) {
+ // TODO fail? Already translated several times...
+ }
+ LocalizerResourceRequestEvent assoc = scheduled.get(req);
+ if (assoc == null) {
+ // internal error
+ LOG.error("Unknown resource reported: " + req);
+ continue;
+ }
+ switch (stat.getStatus()) {
+ case FETCH_SUCCESS:
+ // notify resource
+ try {
+ assoc.getResource().handle(
+ new ResourceLocalizedEvent(req,
+ ConverterUtils.getPathFromYarnURL(stat.getLocalPath()),
+ stat.getLocalSize()));
+ } catch (URISyntaxException e) { }
+ if (pending.isEmpty()) {
+ // TODO: Synchronization
+ response.setLocalizerAction(LocalizerAction.DIE);
+ break;
+ }
+ response.setLocalizerAction(LocalizerAction.LIVE);
+ LocalResource next = findNextResource();
+ if (next != null) {
+ response.addResource(next);
+ }
+ break;
+ case FETCH_PENDING:
+ response.setLocalizerAction(LocalizerAction.LIVE);
+ break;
+ case FETCH_FAILURE:
+ LOG.info("DEBUG: FAILED " + req, stat.getException());
+ assoc.getResource().unlock();
+ response.setLocalizerAction(LocalizerAction.DIE);
+ // TODO: Why is this event going directly to the container. Why not
+ // the resource itself? What happens to the resource? Is it removed?
+ dispatcher.getEventHandler().handle(
+ new ContainerResourceFailedEvent(context.getContainerId(),
+ req, stat.getException()));
+ break;
+ default:
+ LOG.info("Unknown status: " + stat.getStatus());
+ response.setLocalizerAction(LocalizerAction.DIE);
+ dispatcher.getEventHandler().handle(
+ new ContainerResourceFailedEvent(context.getContainerId(),
+ req, stat.getException()));
+ break;
+ }
+ }
+ return response;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked") // dispatcher not typed
+ public void run() {
+ try {
+ // Use LocalDirAllocator to get nmPrivateDir
+ Path nmPrivateCTokensPath =
+ localDirsSelector.getLocalPathForWrite(
+ NM_PRIVATE_DIR
+ + Path.SEPARATOR
+ + String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT,
+ localizerId), getConfig());
+ // 0) init queue, etc.
+ // 1) write credentials to private dir
+ DataOutputStream tokenOut = null;
+ try {
+ Credentials credentials = context.getCredentials();
+ FileContext lfs = getLocalFileContext(getConfig());
+ tokenOut =
+ lfs.create(nmPrivateCTokensPath, EnumSet.of(CREATE, OVERWRITE));
+ LOG.info("Writing credentials to the nmPrivate file "
+ + nmPrivateCTokensPath.toString() + ". Credentials list: ");
+ if (LOG.isDebugEnabled()) {
+ for (Token<? extends TokenIdentifier> tk : credentials
+ .getAllTokens()) {
+ LOG.debug(tk.getService() + " : " + tk.encodeToUrlString());
+ }
+ }
+ credentials.writeTokenStorageToStream(tokenOut);
+ } finally {
+ if (tokenOut != null) {
+ tokenOut.close();
+ }
+ }
+ // 2) exec initApplication and wait
+ exec.startLocalizer(nmPrivateCTokensPath, localizationServerAddress,
+ context.getUser(),
+ ConverterUtils.toString(context.getContainerId().getAppId()),
+ localizerId, localDirs);
+ // TODO handle ExitCodeException separately?
+ } catch (Exception e) {
+ LOG.info("Localizer failed", e);
+ // 3) on error, report failure to Container and signal ABORT
+ // 3.1) notify resource of failed localization
+ ContainerId cId = context.getContainerId();
+ dispatcher.getEventHandler().handle(
+ new ContainerResourceFailedEvent(cId, null, e));
+ } finally {
+ for (LocalizerResourceRequestEvent event : scheduled.values()) {
+ event.getResource().unlock();
+ }
+ }
+ }
+
+ }
+
+ static class CacheCleanup extends Thread {
+
+ private final Dispatcher dispatcher;
+
+ public CacheCleanup(Dispatcher dispatcher) {
+ this.dispatcher = dispatcher;
+ }
+
+ @Override
+ public void run() {
+ dispatcher.getEventHandler().handle(
+ new LocalizationEvent(LocalizationEventType.CACHE_CLEANUP));
+ }
+
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceRetentionSet.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceRetentionSet.java
new file mode 100644
index 0000000..f39b537
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceRetentionSet.java
@@ -0,0 +1,78 @@
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
+
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+
+public class ResourceRetentionSet {
+
+ private long delSize;
+ private long currentSize;
+ private final long targetSize;
+ private final DeletionService delService;
+ private final SortedMap<LocalizedResource,LocalResourcesTracker> retain;
+
+ ResourceRetentionSet(DeletionService delService, long targetSize) {
+ this(delService, targetSize, new LRUComparator());
+ }
+
+ ResourceRetentionSet(DeletionService delService, long targetSize,
+ Comparator<? super LocalizedResource> cmp) {
+ this(delService, targetSize,
+ new TreeMap<LocalizedResource,LocalResourcesTracker>(cmp));
+ }
+
+ ResourceRetentionSet(DeletionService delService, long targetSize,
+ SortedMap<LocalizedResource,LocalResourcesTracker> retain) {
+ this.retain = retain;
+ this.delService = delService;
+ this.targetSize = targetSize;
+ }
+
+ public void addResources(LocalResourcesTracker newTracker) {
+ for (LocalizedResource resource : newTracker) {
+ currentSize += resource.getSize();
+ if (resource.getRefCount() > 0) {
+ // always retain resources in use
+ continue;
+ }
+ retain.put(resource, newTracker);
+ }
+ for (Iterator<Map.Entry<LocalizedResource,LocalResourcesTracker>> i =
+ retain.entrySet().iterator();
+ currentSize - delSize > targetSize && i.hasNext();) {
+ Map.Entry<LocalizedResource,LocalResourcesTracker> rsrc = i.next();
+ LocalizedResource resource = rsrc.getKey();
+ LocalResourcesTracker tracker = rsrc.getValue();
+ if (tracker.remove(resource, delService)) {
+ delSize += resource.getSize();
+ i.remove();
+ }
+ }
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("Cache: ").append(currentSize).append(", ");
+ sb.append("Deleted: ").append(delSize);
+ return sb.toString();
+ }
+
+ static class LRUComparator implements Comparator<LocalizedResource> {
+ public int compare(LocalizedResource r1, LocalizedResource r2) {
+ long ret = r1.getTimestamp() - r2.getTimestamp();
+ if (0 == ret) {
+ return System.identityHashCode(r1) - System.identityHashCode(r2);
+ }
+ return ret > 0 ? 1 : -1;
+ }
+ public boolean equals(Object other) {
+ return this == other;
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceState.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceState.java
new file mode 100644
index 0000000..751f60e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceState.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
+
+enum ResourceState {
+ INIT,
+ DOWNLOADING,
+ LOCALIZED
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ApplicationLocalizationEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ApplicationLocalizationEvent.java
new file mode 100644
index 0000000..2f9ddcc
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ApplicationLocalizationEvent.java
@@ -0,0 +1,36 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event;
+
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
+
+public class ApplicationLocalizationEvent extends LocalizationEvent {
+
+ final Application app;
+
+ public ApplicationLocalizationEvent(LocalizationEventType type, Application app) {
+ super(type);
+ this.app = app;
+ }
+
+ public Application getApplication() {
+ return app;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ContainerLocalizationEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ContainerLocalizationEvent.java
new file mode 100644
index 0000000..1afc89e7
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ContainerLocalizationEvent.java
@@ -0,0 +1,35 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event;
+
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+
+public class ContainerLocalizationEvent extends LocalizationEvent {
+
+ final Container container;
+
+ public ContainerLocalizationEvent(LocalizationEventType event, Container c) {
+ super(event);
+ this.container = c;
+ }
+
+ public Container getContainer() {
+ return container;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ContainerLocalizationRequestEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ContainerLocalizationRequestEvent.java
new file mode 100644
index 0000000..eeb7354
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ContainerLocalizationRequestEvent.java
@@ -0,0 +1,52 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event;
+
+import java.util.Collection;
+
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest;
+
+public class ContainerLocalizationRequestEvent extends
+ ContainerLocalizationEvent {
+
+ private final LocalResourceVisibility vis;
+ private final Collection<LocalResourceRequest> reqs;
+
+ /**
+ * Event requesting the localization of the reqs all with visibility vis
+ * @param c
+ * @param reqs
+ * @param vis
+ */
+ public ContainerLocalizationRequestEvent(Container c,
+ Collection<LocalResourceRequest> reqs, LocalResourceVisibility vis) {
+ super(LocalizationEventType.INIT_CONTAINER_RESOURCES, c);
+ this.vis = vis;
+ this.reqs = reqs;
+ }
+
+ public LocalResourceVisibility getVisibility() {
+ return vis;
+ }
+
+ public Collection<LocalResourceRequest> getRequestedResources() {
+ return reqs;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizationEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizationEvent.java
new file mode 100644
index 0000000..59ed0bb
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizationEvent.java
@@ -0,0 +1,29 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event;
+
+import org.apache.hadoop.yarn.event.AbstractEvent;
+
+public class LocalizationEvent extends AbstractEvent<LocalizationEventType> {
+
+ public LocalizationEvent(LocalizationEventType event) {
+ super(event);
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizationEventType.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizationEventType.java
new file mode 100644
index 0000000..5134349
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizationEventType.java
@@ -0,0 +1,26 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event;
+
+public enum LocalizationEventType {
+ INIT_APPLICATION_RESOURCES,
+ INIT_CONTAINER_RESOURCES,
+ CACHE_CLEANUP,
+ CLEANUP_CONTAINER_RESOURCES,
+ DESTROY_APPLICATION_RESOURCES,
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizerEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizerEvent.java
new file mode 100644
index 0000000..d3cb4af
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizerEvent.java
@@ -0,0 +1,35 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event;
+
+import org.apache.hadoop.yarn.event.AbstractEvent;
+
+public class LocalizerEvent extends AbstractEvent<LocalizerEventType> {
+
+ private final String localizerId;
+
+ public LocalizerEvent(LocalizerEventType type, String localizerId) {
+ super(type);
+ this.localizerId = localizerId;
+ }
+
+ public String getLocalizerId() {
+ return localizerId;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizerEventType.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizerEventType.java
new file mode 100644
index 0000000..09a1ae0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizerEventType.java
@@ -0,0 +1,23 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event;
+
+public enum LocalizerEventType {
+ REQUEST_RESOURCE_LOCALIZATION,
+ ABORT_LOCALIZATION
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizerResourceRequestEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizerResourceRequestEvent.java
new file mode 100644
index 0000000..f8a2c899
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizerResourceRequestEvent.java
@@ -0,0 +1,52 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event;
+
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizerContext;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+public class LocalizerResourceRequestEvent extends LocalizerEvent {
+
+ private final LocalizerContext context;
+ private final LocalizedResource resource;
+ private final LocalResourceVisibility vis;
+
+ public LocalizerResourceRequestEvent(LocalizedResource resource,
+ LocalResourceVisibility vis, LocalizerContext context) {
+ super(LocalizerEventType.REQUEST_RESOURCE_LOCALIZATION,
+ ConverterUtils.toString(context.getContainerId()));
+ this.vis = vis;
+ this.context = context;
+ this.resource = resource;
+ }
+
+ public LocalizedResource getResource() {
+ return resource;
+ }
+
+ public LocalizerContext getContext() {
+ return context;
+ }
+
+ public LocalResourceVisibility getVisibility() {
+ return vis;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceEvent.java
new file mode 100644
index 0000000..079534ef
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceEvent.java
@@ -0,0 +1,36 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event;
+
+import org.apache.hadoop.yarn.event.AbstractEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest;
+
+public class ResourceEvent extends AbstractEvent<ResourceEventType> {
+
+ private final LocalResourceRequest rsrc;
+
+ public ResourceEvent(LocalResourceRequest rsrc, ResourceEventType type) {
+ super(type);
+ this.rsrc = rsrc;
+ }
+
+ public LocalResourceRequest getLocalResourceRequest() {
+ return rsrc;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceEventType.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceEventType.java
new file mode 100644
index 0000000..8e84ac6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceEventType.java
@@ -0,0 +1,24 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event;
+
+public enum ResourceEventType {
+ REQUEST,
+ LOCALIZED,
+ RELEASE
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceLocalizedEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceLocalizedEvent.java
new file mode 100644
index 0000000..a1a388d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceLocalizedEvent.java
@@ -0,0 +1,43 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest;
+
+public class ResourceLocalizedEvent extends ResourceEvent {
+
+ private final long size;
+ private final Path location;
+
+ public ResourceLocalizedEvent(LocalResourceRequest rsrc, Path location,
+ long size) {
+ super(rsrc, ResourceEventType.LOCALIZED);
+ this.size = size;
+ this.location = location;
+ }
+
+ public Path getLocation() {
+ return location;
+ }
+
+ public long getSize() {
+ return size;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceReleaseEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceReleaseEvent.java
new file mode 100644
index 0000000..4ab7982
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceReleaseEvent.java
@@ -0,0 +1,39 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event;
+
+import java.net.URISyntaxException;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest;
+
+public class ResourceReleaseEvent extends ResourceEvent {
+
+ private final ContainerId container;
+
+ public ResourceReleaseEvent(LocalResourceRequest rsrc, ContainerId container)
+ throws URISyntaxException {
+ super(rsrc, ResourceEventType.RELEASE);
+ this.container = container;
+ }
+
+ public ContainerId getContainer() {
+ return container;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceRequestEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceRequestEvent.java
new file mode 100644
index 0000000..af21842
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceRequestEvent.java
@@ -0,0 +1,44 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event;
+
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizerContext;
+
+public class ResourceRequestEvent extends ResourceEvent {
+
+ private final LocalizerContext context;
+ private final LocalResourceVisibility vis;
+
+ public ResourceRequestEvent(LocalResourceRequest resource,
+ LocalResourceVisibility vis, LocalizerContext context) {
+ super(resource, ResourceEventType.REQUEST);
+ this.vis = vis;
+ this.context = context;
+ }
+
+ public LocalizerContext getContext() {
+ return context;
+ }
+
+ public LocalResourceVisibility getVisibility() {
+ return vis;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerSecurityInfo.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerSecurityInfo.java
new file mode 100644
index 0000000..050b992
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerSecurityInfo.java
@@ -0,0 +1,59 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security;
+
+import java.lang.annotation.Annotation;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.security.token.TokenSelector;
+import org.apache.hadoop.yarn.proto.LocalizationProtocol;
+
+public class LocalizerSecurityInfo extends SecurityInfo {
+
+ @Override
+ public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
+ return null;
+ }
+
+ @Override
+ public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
+ if (!protocol
+ .equals(LocalizationProtocol.LocalizationProtocolService.BlockingInterface.class)) {
+ return null;
+ }
+ return new TokenInfo() {
+
+ @Override
+ public Class<? extends Annotation> annotationType() {
+ return null;
+ }
+
+ @Override
+ public Class<? extends TokenSelector<? extends TokenIdentifier>>
+ value() {
+ System.err.print("=========== Using localizerTokenSecurityInfo");
+ return LocalizerTokenSelector.class;
+ }
+ };
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenIdentifier.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenIdentifier.java
new file mode 100644
index 0000000..6e42d4e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenIdentifier.java
@@ -0,0 +1,57 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.TokenIdentifier;
+
+public class LocalizerTokenIdentifier extends TokenIdentifier {
+
+ public static final Text KIND = new Text("Localizer");
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ // TODO Auto-generated method stub
+ out.writeInt(1);
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ // TODO Auto-generated method stub
+ in.readInt();
+ }
+
+ @Override
+ public Text getKind() {
+ // TODO Auto-generated method stub
+ return KIND;
+ }
+
+ @Override
+ public UserGroupInformation getUser() {
+ // TODO Auto-generated method stub
+ return UserGroupInformation.createRemoteUser("testing");
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenSecretManager.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenSecretManager.java
new file mode 100644
index 0000000..3f77059
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenSecretManager.java
@@ -0,0 +1,44 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security;
+
+import org.apache.hadoop.security.token.SecretManager;
+
+public class LocalizerTokenSecretManager extends
+ SecretManager<LocalizerTokenIdentifier> {
+
+ @Override
+ protected byte[] createPassword(LocalizerTokenIdentifier identifier) {
+ return "testing".getBytes();
+ }
+
+ @Override
+ public byte[] retrievePassword(LocalizerTokenIdentifier identifier)
+ throws org.apache.hadoop.security.token.SecretManager.InvalidToken {
+ // TODO Auto-generated method stub
+ return "testing".getBytes();
+ }
+
+ @Override
+ public LocalizerTokenIdentifier createIdentifier() {
+ // TODO Auto-generated method stub
+ return new LocalizerTokenIdentifier();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenSelector.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenSelector.java
new file mode 100644
index 0000000..5ede0e6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenSelector.java
@@ -0,0 +1,50 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security;
+
+import java.util.Collection;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenSelector;
+
+public class LocalizerTokenSelector implements
+ TokenSelector<LocalizerTokenIdentifier> {
+
+ @Override
+ public Token<LocalizerTokenIdentifier> selectToken(Text service,
+ Collection<Token<? extends TokenIdentifier>> tokens) {
+ System.err.print("=========== Using localizerTokenSelector");
+// if (service == null) {
+// return null;
+// }
+ for (Token<? extends TokenIdentifier> token : tokens) {
+ System.err.print("============ token of kind " + token.getKind() + " is found");
+ if (LocalizerTokenIdentifier.KIND.equals(token.getKind())
+ //&& service.equals(token.getService())
+ ) {
+ return (Token<LocalizerTokenIdentifier>) token;
+ }
+ }
+ System.err.print("returning null ========== ");
+ return null;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AggregatedLogFormat.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AggregatedLogFormat.java
new file mode 100644
index 0000000..251b391
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AggregatedLogFormat.java
@@ -0,0 +1,262 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation;
+
+import java.io.DataInput;
+import java.io.DataInputStream;
+import java.io.DataOutput;
+import java.io.DataOutputStream;
+import java.io.EOFException;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+import java.util.EnumSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.file.tfile.TFile;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+public class AggregatedLogFormat {
+
+ static final Log LOG = LogFactory.getLog(AggregatedLogFormat.class);
+
+ public static class LogKey implements Writable {
+
+ private String containerId;
+
+ public LogKey() {
+
+ }
+
+ public LogKey(ContainerId containerId) {
+ this.containerId = ConverterUtils.toString(containerId);
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ out.writeUTF(this.containerId);
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ this.containerId = in.readUTF();
+ }
+
+ @Override
+ public String toString() {
+ return this.containerId;
+ }
+ }
+
+ public static class LogValue {
+
+ private final String[] rootLogDirs;
+ private final ContainerId containerId;
+
+ public LogValue(String[] rootLogDirs, ContainerId containerId) {
+ this.rootLogDirs = rootLogDirs;
+ this.containerId = containerId;
+ }
+
+ public void write(DataOutputStream out) throws IOException {
+ for (String rootLogDir : this.rootLogDirs) {
+ File appLogDir =
+ new File(rootLogDir, ConverterUtils.toString(this.containerId
+ .getAppId()));
+ File containerLogDir =
+ new File(appLogDir, ConverterUtils.toString(this.containerId));
+
+ if (!containerLogDir.isDirectory()) {
+ continue; // ContainerDir may have been deleted by the user.
+ }
+
+ for (File logFile : containerLogDir.listFiles()) {
+
+ // Write the logFile Type
+ out.writeUTF(logFile.getName());
+
+ // Write the log length as UTF so that it is printable
+ out.writeUTF(String.valueOf(logFile.length()));
+
+ // Write the log itself
+ FileInputStream in = null;
+ try {
+ in = new FileInputStream(logFile);
+ byte[] buf = new byte[65535];
+ int len = 0;
+ while ((len = in.read(buf)) != -1) {
+ out.write(buf, 0, len);
+ }
+ } finally {
+ in.close();
+ }
+ }
+ }
+ }
+ }
+
+ public static class LogWriter {
+
+ private final FSDataOutputStream fsDataOStream;
+ private final TFile.Writer writer;
+
+ public LogWriter(final Configuration conf, final Path remoteAppLogFile,
+ UserGroupInformation userUgi) throws IOException {
+ try {
+ this.fsDataOStream =
+ userUgi.doAs(new PrivilegedExceptionAction<FSDataOutputStream>() {
+ @Override
+ public FSDataOutputStream run() throws Exception {
+ return FileContext.getFileContext(conf).create(
+ remoteAppLogFile,
+ EnumSet.of(CreateFlag.CREATE), new Options.CreateOpts[] {});
+ }
+ });
+ } catch (InterruptedException e) {
+ throw new IOException(e);
+ }
+
+ // Keys are not sorted: null arg
+ // 256KB minBlockSize : Expected log size for each container too
+ this.writer =
+ new TFile.Writer(this.fsDataOStream, 256 * 1024, conf.get(
+ LogAggregationService.LOG_COMPRESSION_TYPE,
+ LogAggregationService.DEFAULT_COMPRESSION_TYPE), null, conf);
+ }
+
+ public void append(LogKey logKey, LogValue logValue) throws IOException {
+ DataOutputStream out = this.writer.prepareAppendKey(-1);
+ logKey.write(out);
+ out.close();
+ out = this.writer.prepareAppendValue(-1);
+ logValue.write(out);
+ out.close();
+ this.fsDataOStream.hflush();
+ }
+
+ public void closeWriter() {
+ try {
+ this.writer.close();
+ } catch (IOException e) {
+ LOG.warn("Exception closing writer", e);
+ }
+ try {
+ this.fsDataOStream.close();
+ } catch (IOException e) {
+ LOG.warn("Exception closing output-stream", e);
+ }
+ }
+ }
+
+ public static class LogReader {
+
+ private final FSDataInputStream fsDataIStream;
+ private final TFile.Reader.Scanner scanner;
+
+ public LogReader(Configuration conf, Path remoteAppLogFile)
+ throws IOException {
+ FileContext fileContext = FileContext.getFileContext(conf);
+ this.fsDataIStream = fileContext.open(remoteAppLogFile);
+ TFile.Reader reader =
+ new TFile.Reader(this.fsDataIStream, fileContext.getFileStatus(
+ remoteAppLogFile).getLen(), conf);
+ this.scanner = reader.createScanner();
+ }
+
+ private boolean atBeginning = true;
+
+ /**
+ * Read the next key and return the value-stream.
+ *
+ * @param key
+ * @return the valueStream if there are more keys or null otherwise.
+ * @throws IOException
+ */
+ public DataInputStream next(LogKey key) throws IOException {
+ if (!this.atBeginning) {
+ this.scanner.advance();
+ } else {
+ this.atBeginning = false;
+ }
+ if (this.scanner.atEnd()) {
+ return null;
+ }
+ TFile.Reader.Scanner.Entry entry = this.scanner.entry();
+ key.readFields(entry.getKeyStream());
+ DataInputStream valueStream = entry.getValueStream();
+ return valueStream;
+ }
+
+ /**
+ * Keep calling this till you get a {@link EOFException} for getting logs of
+ * all types for a single container.
+ *
+ * @param valueStream
+ * @param out
+ * @throws IOException
+ */
+ public static void readAContainerLogsForALogType(
+ DataInputStream valueStream, DataOutputStream out)
+ throws IOException {
+
+ byte[] buf = new byte[65535];
+
+ String fileType = valueStream.readUTF();
+ String fileLengthStr = valueStream.readUTF();
+ long fileLength = Long.parseLong(fileLengthStr);
+ out.writeUTF("\nLogType:");
+ out.writeUTF(fileType);
+ out.writeUTF("\nLogLength:");
+ out.writeUTF(fileLengthStr);
+ out.writeUTF("\nLog Contents:\n");
+
+ int curRead = 0;
+ long pendingRead = fileLength - curRead;
+ int toRead =
+ pendingRead > buf.length ? buf.length : (int) pendingRead;
+ int len = valueStream.read(buf, 0, toRead);
+ while (len != -1 && curRead < fileLength) {
+ out.write(buf, 0, len);
+ curRead += len;
+
+ pendingRead = fileLength - curRead;
+ toRead =
+ pendingRead > buf.length ? buf.length : (int) pendingRead;
+ len = valueStream.read(buf, 0, toRead);
+ }
+ }
+
+ public void close() throws IOException {
+ this.scanner.close();
+ this.fsDataIStream.close();
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregator.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregator.java
new file mode 100644
index 0000000..71d5d9b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregator.java
@@ -0,0 +1,32 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+
+public interface AppLogAggregator extends Runnable {
+
+ void startContainerLogAggregation(ContainerId containerId,
+ boolean wasContainerSuccessful);
+
+ void finishLogAggregation();
+
+ void join();
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
new file mode 100644
index 0000000..e3c0335
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -0,0 +1,211 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.AggregatedLogFormat.LogKey;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.AggregatedLogFormat.LogValue;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.AggregatedLogFormat.LogWriter;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+public class AppLogAggregatorImpl implements AppLogAggregator {
+
+ private static final Log LOG = LogFactory
+ .getLog(AppLogAggregatorImpl.class);
+ private static final int THREAD_SLEEP_TIME = 1000;
+
+ private final String applicationId;
+ private boolean logAggregationDisabled = false;
+ private final Configuration conf;
+ private final DeletionService delService;
+ private final UserGroupInformation userUgi;
+ private final String[] rootLogDirs;
+ private final Path remoteNodeLogFileForApp;
+ private final ContainerLogsRetentionPolicy retentionPolicy;
+
+ private final BlockingQueue<ContainerId> pendingContainers;
+ private final AtomicBoolean appFinishing = new AtomicBoolean();
+ private final AtomicBoolean appAggregationFinished = new AtomicBoolean();
+
+ private LogWriter writer = null;
+
+ public AppLogAggregatorImpl(DeletionService deletionService,
+ Configuration conf, ApplicationId appId, UserGroupInformation userUgi,
+ String[] localRootLogDirs, Path remoteNodeLogFileForApp,
+ ContainerLogsRetentionPolicy retentionPolicy) {
+ this.conf = conf;
+ this.delService = deletionService;
+ this.applicationId = ConverterUtils.toString(appId);
+ this.userUgi = userUgi;
+ this.rootLogDirs = localRootLogDirs;
+ this.remoteNodeLogFileForApp = remoteNodeLogFileForApp;
+ this.retentionPolicy = retentionPolicy;
+ this.pendingContainers = new LinkedBlockingQueue<ContainerId>();
+ }
+
+ private void uploadLogsForContainer(ContainerId containerId) {
+
+ if (this.logAggregationDisabled) {
+ return;
+ }
+
+ // Lazy creation of the writer
+ if (this.writer == null) {
+ LOG.info("Starting aggregate log-file for app " + this.applicationId);
+ try {
+ this.writer =
+ new LogWriter(this.conf, this.remoteNodeLogFileForApp,
+ this.userUgi);
+ } catch (IOException e) {
+ LOG.error("Cannot create writer for app " + this.applicationId
+ + ". Disabling log-aggregation for this app.", e);
+ this.logAggregationDisabled = true;
+ return;
+ }
+ }
+
+ LOG.info("Uploading logs for container " + containerId);
+ LogKey logKey = new LogKey(containerId);
+ LogValue logValue = new LogValue(this.rootLogDirs, containerId);
+ try {
+ this.writer.append(logKey, logValue);
+ } catch (IOException e) {
+ LOG.error("Couldn't upload logs for " + containerId
+ + ". Skipping this container.");
+ }
+ }
+
+ @Override
+ public void run() {
+
+ ContainerId containerId;
+
+ while (!this.appFinishing.get()) {
+ try {
+ containerId = this.pendingContainers.poll();
+ if (containerId == null) {
+ Thread.sleep(THREAD_SLEEP_TIME);
+ } else {
+ uploadLogsForContainer(containerId);
+ }
+ } catch (InterruptedException e) {
+ LOG.warn("PendingContainers queue is interrupted");
+ }
+ }
+
+ // Application is finished. Finish pending-containers
+ while ((containerId = this.pendingContainers.poll()) != null) {
+ uploadLogsForContainer(containerId);
+ }
+
+ // Remove the local app-log-dirs
+ Path[] localAppLogDirs = new Path[this.rootLogDirs.length];
+ int index = 0;
+ for (String rootLogDir : this.rootLogDirs) {
+ localAppLogDirs[index] = new Path(rootLogDir, this.applicationId);
+ index++;
+ }
+ this.delService.delete(this.userUgi.getShortUserName(), null,
+ localAppLogDirs);
+
+ if (this.writer != null) {
+ this.writer.closeWriter();
+ LOG.info("Finished aggregate log-file for app " + this.applicationId);
+ }
+
+ this.appAggregationFinished.set(true);
+ }
+
+ private boolean shouldUploadLogs(ContainerId containerId,
+ boolean wasContainerSuccessful) {
+
+ // All containers
+ if (this.retentionPolicy
+ .equals(ContainerLogsRetentionPolicy.ALL_CONTAINERS)) {
+ return true;
+ }
+
+ // AM Container only
+ if (this.retentionPolicy
+ .equals(ContainerLogsRetentionPolicy.APPLICATION_MASTER_ONLY)) {
+ if (containerId.getId() == 1) {
+ return true;
+ }
+ return false;
+ }
+
+ // AM + Failing containers
+ if (this.retentionPolicy
+ .equals(ContainerLogsRetentionPolicy.AM_AND_FAILED_CONTAINERS_ONLY)) {
+ if (containerId.getId() == 1) {
+ return true;
+ } else if(!wasContainerSuccessful) {
+ return true;
+ }
+ return false;
+ }
+ return false;
+ }
+
+ @Override
+ public void startContainerLogAggregation(ContainerId containerId,
+ boolean wasContainerSuccessful) {
+ if (shouldUploadLogs(containerId, wasContainerSuccessful)) {
+ LOG.info("Considering container " + containerId
+ + " for log-aggregation");
+ this.pendingContainers.add(containerId);
+ }
+ }
+
+ @Override
+ public void finishLogAggregation() {
+ LOG.info("Application just finished : " + this.applicationId);
+ this.appFinishing.set(true);
+ }
+
+ @Override
+ public void join() {
+ // Aggregation service is finishing
+ this.finishLogAggregation();
+
+ while (!this.appAggregationFinished.get()) {
+ LOG.info("Waiting for aggregation to complete for "
+ + this.applicationId);
+ try {
+ Thread.sleep(THREAD_SLEEP_TIME);
+ } catch (InterruptedException e) {
+ LOG.warn("Join interrupted. Some logs may not have been aggregated!!");
+ break;
+ }
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/ContainerLogsRetentionPolicy.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/ContainerLogsRetentionPolicy.java
new file mode 100644
index 0000000..dd2cf84
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/ContainerLogsRetentionPolicy.java
@@ -0,0 +1,5 @@
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation;
+
+public enum ContainerLogsRetentionPolicy {
+ APPLICATION_MASTER_ONLY, AM_AND_FAILED_CONTAINERS_ONLY, ALL_CONTAINERS
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
new file mode 100644
index 0000000..c5eadfd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
@@ -0,0 +1,233 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation;
+
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.DEFAULT_NM_BIND_ADDRESS;
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.NM_BIND_ADDRESS;
+
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
+import java.security.PrivilegedExceptionAction;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.NMConfig;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.event.LogAggregatorAppFinishedEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.event.LogAggregatorAppStartedEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.event.LogAggregatorContainerFinishedEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.event.LogAggregatorEvent;
+import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+public class LogAggregationService extends AbstractService implements
+ EventHandler<LogAggregatorEvent> {
+
+ private static final Log LOG = LogFactory
+ .getLog(LogAggregationService.class);
+
+ private final DeletionService deletionService;
+
+ private String[] localRootLogDirs;
+ Path remoteRootLogDir;
+ private String nodeFile;
+
+ static final String LOG_COMPRESSION_TYPE = NMConfig.NM_PREFIX
+ + "logaggregation.log_compression_type";
+ static final String DEFAULT_COMPRESSION_TYPE = "none";
+
+ private static final String LOG_RENTENTION_POLICY_CONFIG_KEY =
+ NMConfig.NM_PREFIX + "logaggregation.retain-policy";
+
+ private final ConcurrentMap<ApplicationId, AppLogAggregator> appLogAggregators;
+
+ private final ExecutorService threadPool;
+
+ public LogAggregationService(DeletionService deletionService) {
+ super(LogAggregationService.class.getName());
+ this.deletionService = deletionService;
+ this.appLogAggregators =
+ new ConcurrentHashMap<ApplicationId, AppLogAggregator>();
+ this.threadPool = Executors.newCachedThreadPool();
+ }
+
+ public synchronized void init(Configuration conf) {
+ this.localRootLogDirs =
+ conf.getStrings(NMConfig.NM_LOG_DIR, NMConfig.DEFAULT_NM_LOG_DIR);
+ this.remoteRootLogDir =
+ new Path(conf.get(NMConfig.REMOTE_USER_LOG_DIR,
+ NMConfig.DEFAULT_REMOTE_APP_LOG_DIR));
+ super.init(conf);
+ }
+
+ @Override
+ public synchronized void start() {
+ String address =
+ getConfig().get(NM_BIND_ADDRESS, DEFAULT_NM_BIND_ADDRESS);
+ InetSocketAddress cmBindAddress = NetUtils.createSocketAddr(address);
+ try {
+ this.nodeFile =
+ InetAddress.getLocalHost().getHostAddress() + "_"
+ + cmBindAddress.getPort();
+ } catch (UnknownHostException e) {
+ throw new YarnException(e);
+ }
+ super.start();
+ }
+
+ Path getRemoteNodeLogFileForApp(ApplicationId appId) {
+ return getRemoteNodeLogFileForApp(this.remoteRootLogDir, appId,
+ this.nodeFile);
+ }
+
+ static Path getRemoteNodeLogFileForApp(Path remoteRootLogDir,
+ ApplicationId appId, String nodeFile) {
+ return new Path(getRemoteAppLogDir(remoteRootLogDir, appId),
+ nodeFile);
+ }
+
+ static Path getRemoteAppLogDir(Path remoteRootLogDir,
+ ApplicationId appId) {
+ return new Path(remoteRootLogDir, ConverterUtils.toString(appId));
+ }
+
+ @Override
+ public synchronized void stop() {
+ LOG.info(this.getName() + " waiting for pending aggregation during exit");
+ for (AppLogAggregator appLogAggregator : this.appLogAggregators.values()) {
+ appLogAggregator.join();
+ }
+ super.stop();
+ }
+
+ private void initApp(final ApplicationId appId, String user,
+ Credentials credentials, ContainerLogsRetentionPolicy logRetentionPolicy) {
+
+ // Get user's FileSystem credentials
+ UserGroupInformation userUgi =
+ UserGroupInformation.createRemoteUser(user);
+ if (credentials != null) {
+ for (Token<? extends TokenIdentifier> token : credentials
+ .getAllTokens()) {
+ userUgi.addToken(token);
+ }
+ }
+
+ // New application
+ AppLogAggregator appLogAggregator =
+ new AppLogAggregatorImpl(this.deletionService, getConfig(), appId,
+ userUgi, this.localRootLogDirs,
+ getRemoteNodeLogFileForApp(appId), logRetentionPolicy);
+ if (this.appLogAggregators.putIfAbsent(appId, appLogAggregator) != null) {
+ throw new YarnException("Duplicate initApp for " + appId);
+ }
+
+ // Create the app dir
+ try {
+ userUgi.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ // TODO: Reuse FS for user?
+ FileSystem remoteFS = FileSystem.get(getConfig());
+ remoteFS.mkdirs(getRemoteAppLogDir(
+ LogAggregationService.this.remoteRootLogDir, appId)
+ .makeQualified(remoteFS.getUri(),
+ remoteFS.getWorkingDirectory()));
+ return null;
+ }
+ });
+ } catch (Exception e) {
+ throw new YarnException(e);
+ }
+
+ // Get the user configuration for the list of containers that need log
+ // aggregation.
+
+ // Schedule the aggregator.
+ this.threadPool.execute(appLogAggregator);
+ }
+
+ private void stopContainer(ContainerId containerId, String exitCode) {
+
+ // A container is complete. Put this containers' logs up for aggregation if
+ // this containers' logs are needed.
+
+ if (!this.appLogAggregators.containsKey(containerId.getAppId())) {
+ throw new YarnException("Application is not initialized yet for "
+ + containerId);
+ }
+ this.appLogAggregators.get(containerId.getAppId())
+ .startContainerLogAggregation(containerId, exitCode.equals("0"));
+ }
+
+ private void stopApp(ApplicationId appId) {
+
+ // App is complete. Finish up any containers' pending log aggregation and
+ // close the application specific logFile.
+
+ if (!this.appLogAggregators.containsKey(appId)) {
+ throw new YarnException("Application is not initialized yet for "
+ + appId);
+ }
+ this.appLogAggregators.get(appId).finishLogAggregation();
+ }
+
+ @Override
+ public void handle(LogAggregatorEvent event) {
+// switch (event.getType()) {
+// case APPLICATION_STARTED:
+// LogAggregatorAppStartedEvent appStartEvent =
+// (LogAggregatorAppStartedEvent) event;
+// initApp(appStartEvent.getApplicationId(), appStartEvent.getUser(),
+// appStartEvent.getCredentials(),
+// appStartEvent.getLogRetentionPolicy());
+// break;
+// case CONTAINER_FINISHED:
+// LogAggregatorContainerFinishedEvent containerFinishEvent =
+// (LogAggregatorContainerFinishedEvent) event;
+// stopContainer(containerFinishEvent.getContainerId(),
+// containerFinishEvent.getExitCode());
+// break;
+// case APPLICATION_FINISHED:
+// LogAggregatorAppFinishedEvent appFinishedEvent =
+// (LogAggregatorAppFinishedEvent) event;
+// stopApp(appFinishedEvent.getApplicationId());
+// break;
+// default:
+// ; // Ignore
+// }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogDumper.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogDumper.java
new file mode 100644
index 0000000..1caf36e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogDumper.java
@@ -0,0 +1,197 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.EOFException;
+import java.io.IOException;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.nodemanager.NMConfig;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.AggregatedLogFormat.LogKey;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.AggregatedLogFormat.LogReader;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+public class LogDumper extends Configured implements Tool {
+
+ private static final String CONTAINER_ID_OPTION = "containerId";
+ private static final String APPLICATION_ID_OPTION = "applicationId";
+ private static final String NODE_ADDRESS_OPTION = "nodeAddress";
+
+ @Override
+ public int run(String[] args) throws Exception {
+
+ Options opts = new Options();
+ opts.addOption(APPLICATION_ID_OPTION, true, "ApplicationId");
+ opts.addOption(CONTAINER_ID_OPTION, true, "ContainerId");
+ opts.addOption(NODE_ADDRESS_OPTION, true, "NodeAddress");
+
+ if (args.length < 1) {
+ HelpFormatter formatter = new HelpFormatter();
+ formatter.printHelp("general options are: ", opts);
+ return -1;
+ }
+
+ CommandLineParser parser = new GnuParser();
+ String appIdStr = null;
+ String containerIdStr = null;
+ String nodeAddress = null;
+ try {
+ CommandLine commandLine = parser.parse(opts, args, true);
+ appIdStr = commandLine.getOptionValue(APPLICATION_ID_OPTION);
+ containerIdStr = commandLine.getOptionValue(CONTAINER_ID_OPTION);
+ nodeAddress = commandLine.getOptionValue(NODE_ADDRESS_OPTION);
+ } catch (ParseException e) {
+ System.out.println("options parsing failed: " + e.getMessage());
+
+ HelpFormatter formatter = new HelpFormatter();
+ formatter.printHelp("general options are: ", opts);
+ return -1;
+ }
+
+ if (appIdStr == null) {
+ System.out.println("ApplicationId cannot be null!");
+ HelpFormatter formatter = new HelpFormatter();
+ formatter.printHelp("general options are: ", opts);
+ return -1;
+ }
+
+ RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(getConf());
+ ApplicationId appId =
+ ConverterUtils.toApplicationId(recordFactory, appIdStr);
+
+ DataOutputStream out = new DataOutputStream(System.out);
+
+ if (containerIdStr == null && nodeAddress == null) {
+ dumpAllContainersLogs(appId, out);
+ } else if ((containerIdStr == null && nodeAddress != null)
+ || (containerIdStr != null && nodeAddress == null)) {
+ System.out.println("ContainerId or NodeAddress cannot be null!");
+ HelpFormatter formatter = new HelpFormatter();
+ formatter.printHelp("general options are: ", opts);
+ return -1;
+ } else {
+ Path remoteRootLogDir =
+ new Path(getConf().get(NMConfig.REMOTE_USER_LOG_DIR,
+ NMConfig.DEFAULT_REMOTE_APP_LOG_DIR));
+ AggregatedLogFormat.LogReader reader =
+ new AggregatedLogFormat.LogReader(getConf(),
+ LogAggregationService.getRemoteNodeLogFileForApp(
+ remoteRootLogDir, appId, nodeAddress));
+ return dumpAContainerLogs(containerIdStr, reader, out);
+ }
+
+ return 0;
+ }
+
+ private int dumpAContainerLogs(String containerIdStr,
+ AggregatedLogFormat.LogReader reader, DataOutputStream out)
+ throws IOException {
+ DataInputStream valueStream;
+ LogKey key = new LogKey();
+ valueStream = reader.next(key);
+
+ while (valueStream != null && !key.toString().equals(containerIdStr)) {
+ // Next container
+ key = new LogKey();
+ valueStream = reader.next(key);
+ }
+
+ if (valueStream == null) {
+ System.out.println("Logs for container " + containerIdStr
+ + " are not present in this log-file.");
+ return -1;
+ }
+
+ while (true) {
+ try {
+ LogReader.readAContainerLogsForALogType(valueStream, out);
+ } catch (EOFException eof) {
+ break;
+ }
+ }
+ return 0;
+ }
+
+ private void
+ dumpAllContainersLogs(ApplicationId appId, DataOutputStream out)
+ throws IOException {
+ Path remoteRootLogDir =
+ new Path(getConf().get(NMConfig.REMOTE_USER_LOG_DIR,
+ NMConfig.DEFAULT_REMOTE_APP_LOG_DIR));
+ Path remoteAppLogDir =
+ LogAggregationService.getRemoteAppLogDir(remoteRootLogDir, appId);
+ RemoteIterator<FileStatus> nodeFiles =
+ FileContext.getFileContext().listStatus(remoteAppLogDir);
+ while (nodeFiles.hasNext()) {
+ FileStatus thisNodeFile = nodeFiles.next();
+ AggregatedLogFormat.LogReader reader =
+ new AggregatedLogFormat.LogReader(getConf(),
+ LogAggregationService.getRemoteNodeLogFileForApp(
+ remoteRootLogDir, appId, thisNodeFile.getPath().getName()));
+ try {
+
+ DataInputStream valueStream;
+ LogKey key = new LogKey();
+ valueStream = reader.next(key);
+
+ while (valueStream != null) {
+ while (true) {
+ try {
+ LogReader.readAContainerLogsForALogType(valueStream, out);
+ } catch (EOFException eof) {
+ break;
+ }
+ }
+
+ // Next container
+ key = new LogKey();
+ valueStream = reader.next(key);
+ }
+ } finally {
+ reader.close();
+ }
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ Configuration conf = new YarnConfiguration();
+ LogDumper logDumper = new LogDumper();
+ logDumper.setConf(conf);
+ logDumper.run(args);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/event/LogAggregatorAppFinishedEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/event/LogAggregatorAppFinishedEvent.java
new file mode 100644
index 0000000..5ff2e21
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/event/LogAggregatorAppFinishedEvent.java
@@ -0,0 +1,36 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.event;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+
+public class LogAggregatorAppFinishedEvent extends LogAggregatorEvent {
+
+ private final ApplicationId applicationId;
+
+ public LogAggregatorAppFinishedEvent(ApplicationId appId) {
+ super(LogAggregatorEventType.APPLICATION_FINISHED);
+ this.applicationId = appId;
+ }
+
+ public ApplicationId getApplicationId() {
+ return this.applicationId;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/event/LogAggregatorAppStartedEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/event/LogAggregatorAppStartedEvent.java
new file mode 100644
index 0000000..0b8c829
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/event/LogAggregatorAppStartedEvent.java
@@ -0,0 +1,57 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.event;
+
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.ContainerLogsRetentionPolicy;
+
+public class LogAggregatorAppStartedEvent extends LogAggregatorEvent {
+
+ private final ApplicationId applicationId;
+ private final ContainerLogsRetentionPolicy retentionPolicy;
+ private final String user;
+ private final Credentials credentials;
+
+ public LogAggregatorAppStartedEvent(ApplicationId appId, String user,
+ Credentials credentials, ContainerLogsRetentionPolicy retentionPolicy) {
+ super(LogAggregatorEventType.APPLICATION_STARTED);
+ this.applicationId = appId;
+ this.user = user;
+ this.credentials = credentials;
+ this.retentionPolicy = retentionPolicy;
+ }
+
+ public ApplicationId getApplicationId() {
+ return this.applicationId;
+ }
+
+ public Credentials getCredentials() {
+ return this.credentials;
+ }
+
+ public ContainerLogsRetentionPolicy getLogRetentionPolicy() {
+ return this.retentionPolicy;
+ }
+
+ public String getUser() {
+ return this.user;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/event/LogAggregatorContainerFinishedEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/event/LogAggregatorContainerFinishedEvent.java
new file mode 100644
index 0000000..117d30c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/event/LogAggregatorContainerFinishedEvent.java
@@ -0,0 +1,43 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.event;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+
+public class LogAggregatorContainerFinishedEvent extends LogAggregatorEvent {
+
+ private final ContainerId containerId;
+ private final String exitCode;
+
+ public LogAggregatorContainerFinishedEvent(ContainerId containerId,
+ String exitCode) {
+ super(LogAggregatorEventType.CONTAINER_FINISHED);
+ this.containerId = containerId;
+ this.exitCode = exitCode;
+ }
+
+ public ContainerId getContainerId() {
+ return this.containerId;
+ }
+
+ public String getExitCode() {
+ return this.exitCode;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/event/LogAggregatorEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/event/LogAggregatorEvent.java
new file mode 100644
index 0000000..052d080
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/event/LogAggregatorEvent.java
@@ -0,0 +1,29 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.event;
+
+import org.apache.hadoop.yarn.event.AbstractEvent;
+
+public class LogAggregatorEvent extends AbstractEvent<LogAggregatorEventType>{
+
+ public LogAggregatorEvent(LogAggregatorEventType type) {
+ super(type);
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/event/LogAggregatorEventType.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/event/LogAggregatorEventType.java
new file mode 100644
index 0000000..64adf74
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/event/LogAggregatorEventType.java
@@ -0,0 +1,23 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.event;
+
+public enum LogAggregatorEventType {
+ APPLICATION_STARTED, CONTAINER_FINISHED, APPLICATION_FINISHED
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerStartMonitoringEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerStartMonitoringEvent.java
new file mode 100644
index 0000000..d217ab8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerStartMonitoringEvent.java
@@ -0,0 +1,25 @@
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+
+public class ContainerStartMonitoringEvent extends ContainersMonitorEvent {
+
+ private final long vmemLimit;
+ private final long pmemLimit;
+
+ public ContainerStartMonitoringEvent(ContainerId containerId,
+ long vmemLimit, long pmemLimit) {
+ super(containerId, ContainersMonitorEventType.START_MONITORING_CONTAINER);
+ this.vmemLimit = vmemLimit;
+ this.pmemLimit = pmemLimit;
+ }
+
+ public long getVmemLimit() {
+ return this.vmemLimit;
+ }
+
+ public long getPmemLimit() {
+ return this.pmemLimit;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerStopMonitoringEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerStopMonitoringEvent.java
new file mode 100644
index 0000000..37e6c5b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerStopMonitoringEvent.java
@@ -0,0 +1,11 @@
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+
+public class ContainerStopMonitoringEvent extends ContainersMonitorEvent {
+
+ public ContainerStopMonitoringEvent(ContainerId containerId) {
+ super(containerId, ContainersMonitorEventType.STOP_MONITORING_CONTAINER);
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitor.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitor.java
new file mode 100644
index 0000000..e54f430
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitor.java
@@ -0,0 +1,28 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor;
+
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.nodemanager.ResourceView;
+import org.apache.hadoop.yarn.service.Service;
+
+public interface ContainersMonitor extends Service,
+ EventHandler<ContainersMonitorEvent>, ResourceView {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorEvent.java
new file mode 100644
index 0000000..56e578b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorEvent.java
@@ -0,0 +1,39 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.event.AbstractEvent;
+
+public class ContainersMonitorEvent extends
+ AbstractEvent<ContainersMonitorEventType> {
+
+ private final ContainerId containerId;
+
+ public ContainersMonitorEvent(ContainerId containerId,
+ ContainersMonitorEventType eventType) {
+ super(eventType);
+ this.containerId = containerId;
+ }
+
+ public ContainerId getContainerId() {
+ return this.containerId;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorEventType.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorEventType.java
new file mode 100644
index 0000000..be99651
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorEventType.java
@@ -0,0 +1,24 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor;
+
+public enum ContainersMonitorEventType {
+ START_MONITORING_CONTAINER,
+ STOP_MONITORING_CONTAINER
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
new file mode 100644
index 0000000..64690df
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -0,0 +1,517 @@
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.NMConfig;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerKillEvent;
+import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree;
+import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
+
+public class ContainersMonitorImpl extends AbstractService implements
+ ContainersMonitor {
+
+ final static Log LOG = LogFactory
+ .getLog(ContainersMonitorImpl.class);
+
+ private final static String MONITORING_INTERVAL_CONFIG_KEY =
+ NMConfig.NM_PREFIX + "containers-monitor.monitoring-interval";
+ public static final String RESOURCE_CALCULATOR_PLUGIN_CONFIG_KEY =
+ NMConfig.NM_PREFIX + "containers-monitor.resourcecalculatorplugin";
+ public static final String NM_RESERVED_PHYSICALMEMORY_MB =
+ NMConfig.NM_PREFIX + "reserved-physical-memory.mb";
+
+ private final static int MONITORING_INTERVAL_DEFAULT = 3000;
+ private long monitoringInterval;
+ private MonitoringThread monitoringThread;
+
+ final List<ContainerId> containersToBeRemoved;
+ final Map<ContainerId, ProcessTreeInfo> containersToBeAdded;
+ Map<ContainerId, ProcessTreeInfo> trackingContainers =
+ new HashMap<ContainerId, ProcessTreeInfo>();
+
+ final ContainerExecutor containerExecutor;
+ private final Dispatcher eventDispatcher;
+ private final Context context;
+ private ResourceCalculatorPlugin resourceCalculatorPlugin;
+
+ private long maxVmemAllottedForContainers = DISABLED_MEMORY_LIMIT;
+ private long maxPmemAllottedForContainers = DISABLED_MEMORY_LIMIT;
+
+ /**
+ * A value which if set for memory related configuration options, indicates
+ * that the options are turned off.
+ */
+ public static final long DISABLED_MEMORY_LIMIT = -1L;
+
+ private static final String MEMORY_USAGE_STRING =
+ "Memory usage of ProcessTree %s for container-id %s : Virtual %d bytes, "
+ +
+ "limit : %d bytes; Physical %d bytes, limit %d bytes";
+
+ public ContainersMonitorImpl(ContainerExecutor exec,
+ AsyncDispatcher dispatcher, Context context) {
+ super("containers-monitor");
+
+ this.containerExecutor = exec;
+ this.eventDispatcher = dispatcher;
+ this.context = context;
+
+ this.containersToBeAdded = new HashMap<ContainerId, ProcessTreeInfo>();
+ this.containersToBeRemoved = new ArrayList<ContainerId>();
+ this.monitoringThread = new MonitoringThread();
+ }
+
+ @Override
+ public synchronized void init(Configuration conf) {
+ this.monitoringInterval =
+ conf.getLong(MONITORING_INTERVAL_CONFIG_KEY,
+ MONITORING_INTERVAL_DEFAULT);
+
+ Class<? extends ResourceCalculatorPlugin> clazz =
+ conf.getClass(RESOURCE_CALCULATOR_PLUGIN_CONFIG_KEY, null,
+ ResourceCalculatorPlugin.class);
+ this.resourceCalculatorPlugin =
+ ResourceCalculatorPlugin.getResourceCalculatorPlugin(clazz, conf);
+ LOG.info(" Using ResourceCalculatorPlugin : "
+ + this.resourceCalculatorPlugin);
+
+ long totalPhysicalMemoryOnNM = DISABLED_MEMORY_LIMIT;
+ if (this.resourceCalculatorPlugin != null) {
+ totalPhysicalMemoryOnNM =
+ this.resourceCalculatorPlugin.getPhysicalMemorySize();
+ if (totalPhysicalMemoryOnNM <= 0) {
+ LOG.warn("NodeManager's totalPmem could not be calculated. "
+ + "Setting it to " + DISABLED_MEMORY_LIMIT);
+ totalPhysicalMemoryOnNM = DISABLED_MEMORY_LIMIT;
+ }
+ }
+
+ // ///////// Virtual memory configuration //////
+ this.maxVmemAllottedForContainers =
+ conf.getLong(NMConfig.NM_VMEM_GB, NMConfig.DEFAULT_NM_VMEM_GB);
+ this.maxVmemAllottedForContainers =
+ this.maxVmemAllottedForContainers * 1024 * 1024 * 1024L; //Normalize
+
+ if (this.maxVmemAllottedForContainers > totalPhysicalMemoryOnNM) {
+ LOG.info("totalMemoryAllottedForContainers > totalPhysicalMemoryOnNM."
+ + " Thrashing might happen.");
+ }
+
+ // ///////// Physical memory configuration //////
+ long reservedPmemOnNM =
+ conf.getLong(NM_RESERVED_PHYSICALMEMORY_MB, DISABLED_MEMORY_LIMIT);
+ reservedPmemOnNM =
+ reservedPmemOnNM == DISABLED_MEMORY_LIMIT
+ ? DISABLED_MEMORY_LIMIT
+ : reservedPmemOnNM * 1024 * 1024; // normalize to bytes
+
+ if (reservedPmemOnNM == DISABLED_MEMORY_LIMIT
+ || totalPhysicalMemoryOnNM == DISABLED_MEMORY_LIMIT) {
+ this.maxPmemAllottedForContainers = DISABLED_MEMORY_LIMIT;
+ } else {
+ this.maxPmemAllottedForContainers =
+ totalPhysicalMemoryOnNM - reservedPmemOnNM;
+ }
+
+ super.init(conf);
+ }
+
+ /**
+ * Is the total physical memory check enabled?
+ *
+ * @return true if total physical memory check is enabled.
+ */
+ boolean isPhysicalMemoryCheckEnabled() {
+ return !(this.maxPmemAllottedForContainers == DISABLED_MEMORY_LIMIT);
+ }
+
+ /**
+ * Is the total virtual memory check enabled?
+ *
+ * @return true if total virtual memory check is enabled.
+ */
+ boolean isVirtualMemoryCheckEnabled() {
+ return !(this.maxVmemAllottedForContainers == DISABLED_MEMORY_LIMIT);
+ }
+
+ private boolean isEnabled() {
+ if (!ProcfsBasedProcessTree.isAvailable()) {
+ LOG.info("ProcessTree implementation is missing on this system. "
+ + this.getClass().getName() + " is disabled.");
+ return false;
+ }
+
+ if (!(isPhysicalMemoryCheckEnabled() || isVirtualMemoryCheckEnabled())) {
+ LOG.info("Neither virutal-memory nor physical-memory monitoring is " +
+ "needed. Not running the monitor-thread");
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public synchronized void start() {
+ if (this.isEnabled()) {
+ this.monitoringThread.start();
+ }
+ super.start();
+ }
+
+ @Override
+ public synchronized void stop() {
+ if (this.isEnabled()) {
+ this.monitoringThread.interrupt();
+ try {
+ this.monitoringThread.join();
+ } catch (InterruptedException e) {
+ ;
+ }
+ }
+ super.stop();
+ }
+
+ private static class ProcessTreeInfo {
+ private ContainerId containerId;
+ private String pid;
+ private ProcfsBasedProcessTree pTree;
+ private long vmemLimit;
+ private long pmemLimit;
+
+ public ProcessTreeInfo(ContainerId containerId, String pid,
+ ProcfsBasedProcessTree pTree, long vmemLimit, long pmemLimit) {
+ this.containerId = containerId;
+ this.pid = pid;
+ this.pTree = pTree;
+ this.vmemLimit = vmemLimit;
+ this.pmemLimit = pmemLimit;
+ }
+
+ public ContainerId getContainerId() {
+ return this.containerId;
+ }
+
+ public String getPID() {
+ return this.pid;
+ }
+
+ public void setPid(String pid) {
+ this.pid = pid;
+ }
+
+ public ProcfsBasedProcessTree getProcessTree() {
+ return this.pTree;
+ }
+
+ public void setProcessTree(ProcfsBasedProcessTree pTree) {
+ this.pTree = pTree;
+ }
+
+ public long getVmemLimit() {
+ return this.vmemLimit;
+ }
+
+ /**
+ * @return Physical memory limit for the process tree in bytes
+ */
+ public long getPmemLimit() {
+ return this.pmemLimit;
+ }
+ }
+
+
+ /**
+ * Check whether a container's process tree's current memory usage is over
+ * limit.
+ *
+ * When a java process exec's a program, it could momentarily account for
+ * double the size of it's memory, because the JVM does a fork()+exec()
+ * which at fork time creates a copy of the parent's memory. If the
+ * monitoring thread detects the memory used by the container tree at the
+ * same instance, it could assume it is over limit and kill the tree, for no
+ * fault of the process itself.
+ *
+ * We counter this problem by employing a heuristic check: - if a process
+ * tree exceeds the memory limit by more than twice, it is killed
+ * immediately - if a process tree has processes older than the monitoring
+ * interval exceeding the memory limit by even 1 time, it is killed. Else it
+ * is given the benefit of doubt to lie around for one more iteration.
+ *
+ * @param containerId
+ * Container Id for the container tree
+ * @param currentMemUsage
+ * Memory usage of a container tree
+ * @param curMemUsageOfAgedProcesses
+ * Memory usage of processes older than an iteration in a container
+ * tree
+ * @param vmemLimit
+ * The limit specified for the container
+ * @return true if the memory usage is more than twice the specified limit,
+ * or if processes in the tree, older than this thread's monitoring
+ * interval, exceed the memory limit. False, otherwise.
+ */
+ boolean isProcessTreeOverLimit(String containerId,
+ long currentMemUsage,
+ long curMemUsageOfAgedProcesses,
+ long vmemLimit) {
+ boolean isOverLimit = false;
+
+ if (currentMemUsage > (2 * vmemLimit)) {
+ LOG.warn("Process tree for container: " + containerId
+ + " running over twice " + "the configured limit. Limit=" + vmemLimit
+ + ", current usage = " + currentMemUsage);
+ isOverLimit = true;
+ } else if (curMemUsageOfAgedProcesses > vmemLimit) {
+ LOG.warn("Process tree for container: " + containerId
+ + " has processes older than 1 "
+ + "iteration running over the configured limit. Limit=" + vmemLimit
+ + ", current usage = " + curMemUsageOfAgedProcesses);
+ isOverLimit = true;
+ }
+
+ return isOverLimit;
+ }
+
+ // method provided just for easy testing purposes
+ boolean isProcessTreeOverLimit(ProcfsBasedProcessTree pTree,
+ String containerId, long limit) {
+ long currentMemUsage = pTree.getCumulativeVmem();
+ // as processes begin with an age 1, we want to see if there are processes
+ // more than 1 iteration old.
+ long curMemUsageOfAgedProcesses = pTree.getCumulativeVmem(1);
+ return isProcessTreeOverLimit(containerId, currentMemUsage,
+ curMemUsageOfAgedProcesses, limit);
+ }
+
+ private class MonitoringThread extends Thread {
+ public MonitoringThread() {
+
+ }
+
+ @Override
+ public void run() {
+
+ while (true) {
+
+ // Print the processTrees for debugging.
+ if (LOG.isDebugEnabled()) {
+ StringBuilder tmp = new StringBuilder("[ ");
+ for (ProcessTreeInfo p : trackingContainers.values()) {
+ tmp.append(p.getPID());
+ tmp.append(" ");
+ }
+ LOG.debug("Current ProcessTree list : "
+ + tmp.substring(0, tmp.length()) + "]");
+ }
+
+ // Add new containers
+ synchronized (containersToBeAdded) {
+ for (Entry<ContainerId, ProcessTreeInfo> entry : containersToBeAdded
+ .entrySet()) {
+ ContainerId containerId = entry.getKey();
+ ProcessTreeInfo processTreeInfo = entry.getValue();
+ LOG.info("Starting resource-monitoring for " + containerId);
+ trackingContainers.put(containerId, processTreeInfo);
+ }
+ containersToBeAdded.clear();
+ }
+
+ // Remove finished containers
+ synchronized (containersToBeRemoved) {
+ for (ContainerId containerId : containersToBeRemoved) {
+ trackingContainers.remove(containerId);
+ LOG.info("Stopping resource-monitoring for " + containerId);
+ }
+ containersToBeRemoved.clear();
+ }
+
+ // Now do the monitoring for the trackingContainers
+ // Check memory usage and kill any overflowing containers
+ long vmemStillInUsage = 0;
+ long pmemStillInUsage = 0;
+ for (Iterator<Map.Entry<ContainerId, ProcessTreeInfo>> it =
+ trackingContainers.entrySet().iterator(); it.hasNext();) {
+
+ Map.Entry<ContainerId, ProcessTreeInfo> entry = it.next();
+ ContainerId containerId = entry.getKey();
+ ProcessTreeInfo ptInfo = entry.getValue();
+ try {
+ String pId = ptInfo.getPID();
+
+ // Initialize any uninitialized processTrees
+ if (pId == null) {
+ // get pid from ContainerId
+ pId = containerExecutor.getProcessId(ptInfo.getContainerId());
+ if (pId != null) {
+ // pId will be null, either if the container is not spawned yet
+ // or if the container's pid is removed from ContainerExecutor
+ LOG.debug("Tracking ProcessTree " + pId
+ + " for the first time");
+
+ ProcfsBasedProcessTree pt =
+ new ProcfsBasedProcessTree(pId,
+ ContainerExecutor.isSetsidAvailable);
+ ptInfo.setPid(pId);
+ ptInfo.setProcessTree(pt);
+ }
+ }
+ // End of initializing any uninitialized processTrees
+
+ if (pId == null) {
+ continue; // processTree cannot be tracked
+ }
+
+ LOG.debug("Constructing ProcessTree for : PID = " + pId
+ + " ContainerId = " + containerId);
+ ProcfsBasedProcessTree pTree = ptInfo.getProcessTree();
+ pTree = pTree.getProcessTree(); // get the updated process-tree
+ ptInfo.setProcessTree(pTree); // update ptInfo with proces-tree of
+ // updated state
+ long currentVmemUsage = pTree.getCumulativeVmem();
+ long currentPmemUsage = pTree.getCumulativeRssmem();
+ // as processes begin with an age 1, we want to see if there
+ // are processes more than 1 iteration old.
+ long curMemUsageOfAgedProcesses = pTree.getCumulativeVmem(1);
+ long curRssMemUsageOfAgedProcesses = pTree.getCumulativeRssmem(1);
+ long vmemLimit = ptInfo.getVmemLimit();
+ long pmemLimit = ptInfo.getPmemLimit();
+ LOG.info(String.format(MEMORY_USAGE_STRING, pId,
+ containerId.toString(), currentVmemUsage, vmemLimit,
+ currentPmemUsage, pmemLimit));
+
+ boolean isMemoryOverLimit = false;
+ String msg = "";
+ if (isVirtualMemoryCheckEnabled()
+ && isProcessTreeOverLimit(containerId.toString(),
+ currentVmemUsage, curMemUsageOfAgedProcesses, vmemLimit)) {
+ // Container (the root process) is still alive and overflowing
+ // memory.
+ // Dump the process-tree and then clean it up.
+ msg =
+ "Container [pid="
+ + pId
+ + ",containerID="
+ + containerId
+ + "] is running beyond memory-limits. Current usage : "
+ + currentVmemUsage
+ + "bytes. Limit : "
+ + vmemLimit
+ + "bytes. Killing container. "
+ + "\nDump of the process-tree for " + containerId
+ + " : \n" + pTree.getProcessTreeDump();
+ isMemoryOverLimit = true;
+ } else if (isPhysicalMemoryCheckEnabled()
+ && isProcessTreeOverLimit(containerId.toString(),
+ currentPmemUsage, curRssMemUsageOfAgedProcesses,
+ pmemLimit)) {
+ // Container (the root process) is still alive and overflowing
+ // memory.
+ // Dump the process-tree and then clean it up.
+ msg =
+ "Container [pid="
+ + pId
+ + ",tipID="
+ + containerId
+ + "] is running beyond physical memory-limits."
+ + " Current usage : "
+ + currentPmemUsage
+ + "bytes. Limit : "
+ + pmemLimit
+ + "bytes. Killing container. \nDump of the process-tree for "
+ + containerId + " : \n" + pTree.getProcessTreeDump();
+ isMemoryOverLimit = true;
+ }
+
+ if (isMemoryOverLimit) {
+ // Virtual or physical memory over limit. Fail the container and
+ // remove
+ // the corresponding process tree
+ LOG.warn(msg);
+ // warn if not a leader
+ if (!pTree.checkPidPgrpidForMatch()) {
+ LOG.error("Killed container process with PID " + pId
+ + " but it is not a process group leader.");
+ }
+ // kill the container
+ eventDispatcher.getEventHandler().handle(
+ new ContainerKillEvent(containerId, msg));
+ it.remove();
+ LOG.info("Removed ProcessTree with root " + pId);
+ } else {
+ // Accounting the total memory in usage for all containers that
+ // are still
+ // alive and within limits.
+ vmemStillInUsage += currentVmemUsage;
+ pmemStillInUsage += currentPmemUsage;
+ }
+ } catch (Exception e) {
+ // Log the exception and proceed to the next container.
+ LOG.warn("Uncaught exception in ContainerMemoryManager "
+ + "while managing memory of " + containerId, e);
+ }
+ }
+
+ try {
+ Thread.sleep(monitoringInterval);
+ } catch (InterruptedException e) {
+ LOG.warn(ContainersMonitorImpl.class.getName()
+ + " is interrupted. Exiting.");
+ break;
+ }
+ }
+ }
+ }
+
+ @Override
+ public long getVmemAllocatedForContainers() {
+ return this.maxVmemAllottedForContainers;
+ }
+
+ @Override
+ public long getPmemAllocatedForContainers() {
+ return this.maxPmemAllottedForContainers;
+ }
+
+ @Override
+ public void handle(ContainersMonitorEvent monitoringEvent) {
+
+ if (!isEnabled()) {
+ return;
+ }
+
+ ContainerId containerId = monitoringEvent.getContainerId();
+ switch (monitoringEvent.getType()) {
+ case START_MONITORING_CONTAINER:
+ ContainerStartMonitoringEvent startEvent =
+ (ContainerStartMonitoringEvent) monitoringEvent;
+ synchronized (this.containersToBeAdded) {
+ ProcessTreeInfo processTreeInfo =
+ new ProcessTreeInfo(containerId, null, null,
+ startEvent.getVmemLimit(), startEvent.getPmemLimit());
+ this.containersToBeAdded.put(containerId, processTreeInfo);
+ }
+ break;
+ case STOP_MONITORING_CONTAINER:
+ synchronized (this.containersToBeRemoved) {
+ this.containersToBeRemoved.add(containerId);
+ }
+ break;
+ default:
+ // TODO: Wrong event.
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java
new file mode 100644
index 0000000..4d62247
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.metrics;
+
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MutableCounterInt;
+import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+import org.apache.hadoop.yarn.api.records.Resource;
+
+@Metrics(about="Metrics for node manager", context="yarn")
+public class NodeManagerMetrics {
+ @Metric MutableCounterInt containersLaunched;
+ @Metric MutableCounterInt containersCompleted;
+ @Metric MutableCounterInt containersFailed;
+ @Metric MutableCounterInt containersKilled;
+ @Metric("# of initializing containers")
+ MutableGaugeInt containersIniting;
+ @Metric MutableGaugeInt containersRunning;
+ @Metric("Current allocated memory in GB")
+ MutableGaugeInt allocatedGB;
+ @Metric("Current # of allocated containers")
+ MutableGaugeInt allocatedContainers;
+ @Metric MutableGaugeInt availableGB;
+
+ public static NodeManagerMetrics create() {
+ return create(DefaultMetricsSystem.instance());
+ }
+
+ static NodeManagerMetrics create(MetricsSystem ms) {
+ JvmMetrics.create("NodeManager", null, ms);
+ return ms.register(new NodeManagerMetrics());
+ }
+
+ // Potential instrumentation interface methods
+
+ public void launchedContainer() {
+ containersLaunched.incr();
+ }
+
+ public void completedContainer() {
+ containersCompleted.incr();
+ }
+
+ public void failedContainer() {
+ containersFailed.incr();
+ }
+
+ public void killedContainer() {
+ containersKilled.incr();
+ }
+
+ public void initingContainer() {
+ containersIniting.incr();
+ }
+
+ public void endInitingContainer() {
+ containersIniting.decr();
+ }
+
+ public void runningContainer() {
+ containersRunning.incr();
+ }
+
+ public void endRunningContainer() {
+ containersRunning.decr();
+ }
+
+ public void allocateContainer(Resource res) {
+ allocatedContainers.incr();
+ allocatedGB.incr(res.getMemory() / 1024);
+ availableGB.decr(res.getMemory() / 1024);
+ }
+
+ public void releaseContainer(Resource res) {
+ allocatedContainers.decr();
+ allocatedGB.decr(res.getMemory() / 1024);
+ availableGB.incr(res.getMemory() / 1024);
+ }
+
+ public void addResource(Resource res) {
+ availableGB.incr(res.getMemory() / 1024);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java
new file mode 100644
index 0000000..a35f81f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java
@@ -0,0 +1,101 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.webapp;
+
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
+
+import java.util.Map.Entry;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.BODY;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+
+import com.google.inject.Inject;
+
+public class AllApplicationsPage extends NMView {
+
+ @Override protected void preHead(Page.HTML<_> html) {
+ commonPreHead(html);
+ setTitle("Applications running on this node");
+ set(DATATABLES_ID, "applications");
+ set(initID(DATATABLES, "applications"), appsTableInit());
+ setTableStyles(html, "applications");
+ }
+
+ private String appsTableInit() {
+ return tableInit().
+ // applicationid, applicationstate
+ append(", aoColumns:[null, null]} ").toString();
+ }
+
+ @Override
+ protected Class<? extends SubView> content() {
+ return AllApplicationsBlock.class;
+ }
+
+ public static class AllApplicationsBlock extends HtmlBlock implements
+ NMWebParams {
+
+ private final Context nmContext;
+
+ @Inject
+ public AllApplicationsBlock(Context nmContext) {
+ this.nmContext = nmContext;
+ }
+
+ @Override
+ protected void render(Block html) {
+
+ TBODY<TABLE<BODY<Hamlet>>> tableBody =
+ html
+ .body()
+ .table("#applications")
+ .thead()
+ .tr()
+ .td()._("ApplicationId")._()
+ .td()._("ApplicationState")._()
+ ._()
+ ._()
+ .tbody();
+ for (Entry<ApplicationId, Application> entry : this.nmContext
+ .getApplications().entrySet()) {
+ ApplicationId appId = entry.getKey();
+ Application app = entry.getValue();
+ String appIdStr = ConverterUtils.toString(appId);
+ tableBody
+ .tr()
+ .td().a(url("application", appIdStr), appIdStr)._()
+ .td()._(app.getApplicationState())
+ ._()
+ ._();
+ }
+ tableBody._()._()._();
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java
new file mode 100644
index 0000000..c5c007b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java
@@ -0,0 +1,101 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.webapp;
+
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
+
+import java.util.Map.Entry;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.BODY;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+
+import com.google.inject.Inject;
+
+public class AllContainersPage extends NMView {
+
+ @Override protected void preHead(Page.HTML<_> html) {
+ commonPreHead(html);
+ setTitle("All containers running on this node");
+ set(DATATABLES_ID, "containers");
+ set(initID(DATATABLES, "containers"), containersTableInit());
+ setTableStyles(html, "containers");
+ }
+
+ private String containersTableInit() {
+ return tableInit().
+ // containerid, containerid, log-url
+ append(", aoColumns:[null, null, {bSearchable:false}]} ").toString();
+ }
+
+ @Override
+ protected Class<? extends SubView> content() {
+ return AllContainersBlock.class;
+ }
+
+ public static class AllContainersBlock extends HtmlBlock implements
+ NMWebParams {
+
+ private final Context nmContext;
+
+ @Inject
+ public AllContainersBlock(Context nmContext) {
+ this.nmContext = nmContext;
+ }
+
+ @Override
+ protected void render(Block html) {
+ TBODY<TABLE<BODY<Hamlet>>> tableBody = html.body()
+ .table("#containers")
+ .thead()
+ .tr()
+ .td()._("ContainerId")._()
+ .td()._("ContainerState")._()
+ .td()._("logs")._()
+ ._()
+ ._().tbody();
+ for (Entry<ContainerId, Container> entry : this.nmContext
+ .getContainers().entrySet()) {
+ ContainerId containerId = entry.getKey();
+ Container container = entry.getValue();
+ String containerIdStr = ConverterUtils.toString(containerId);
+ tableBody
+ .tr()
+ .td().a(url("container", containerIdStr), containerIdStr)
+ ._()
+ .td()._(container.getContainerState())._()
+ .td()
+ .a(url("containerlogs", containerIdStr), "logs")._()
+ ._();
+ }
+ tableBody._()._()._();
+ }
+
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java
new file mode 100644
index 0000000..4fd26be
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java
@@ -0,0 +1,100 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.webapp;
+
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
+
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+import org.apache.hadoop.yarn.webapp.view.InfoBlock;
+
+import com.google.inject.Inject;
+
+public class ApplicationPage extends NMView implements NMWebParams {
+
+ @Override protected void preHead(Page.HTML<_> html) {
+ commonPreHead(html);
+ set(DATATABLES_ID, "containers");
+ set(initID(DATATABLES, "containers"), containersTableInit());
+ setTableStyles(html, "containers");
+ }
+
+ private String containersTableInit() {
+ return tableInit().append(",aoColumns:[null]}").toString();
+ }
+
+ @Override
+ protected Class<? extends SubView> content() {
+ return ApplicationBlock.class;
+ }
+
+ public static class ApplicationBlock extends HtmlBlock implements
+ NMWebParams {
+
+ private final Context nmContext;
+ private final Configuration conf;
+ private final RecordFactory recordFactory;
+
+ @Inject
+ public ApplicationBlock(Context nmContext, Configuration conf) {
+ this.conf = conf;
+ this.nmContext = nmContext;
+ this.recordFactory = RecordFactoryProvider.getRecordFactory(this.conf);
+ }
+
+ @Override
+ protected void render(Block html) {
+ ApplicationId applicationID =
+ ConverterUtils.toApplicationId(this.recordFactory,
+ $(APPLICATION_ID));
+ Application app = this.nmContext.getApplications().get(applicationID);
+ Map<ContainerId, Container> containers = app.getContainers();
+ info("Application's information")
+ ._("ApplicationId", ConverterUtils.toString(app.getAppId()))
+ ._("ApplicationState", app.getApplicationState().toString())
+ ._("User", app.getUser());
+ TABLE<Hamlet> containersListBody = html._(InfoBlock.class)
+ .table("#containers");
+ for (ContainerId containerId : containers.keySet()) {
+ String containerIdStr = ConverterUtils.toString(containerId);
+ containersListBody
+ .tr().td()
+ .a(url("container", containerIdStr), containerIdStr)
+ ._()._();
+ }
+ containersListBody._();
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
new file mode 100644
index 0000000..31fa4a5
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
@@ -0,0 +1,191 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.webapp;
+
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.DEFAULT_NM_LOG_DIR;
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.NM_LOG_DIR;
+
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.NMConfig;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+
+import com.google.inject.Inject;
+
+public class ContainerLogsPage extends NMView {
+ @Override
+ protected Class<? extends SubView> content() {
+ return ContainersLogsBlock.class;
+ }
+
+ public static class ContainersLogsBlock extends HtmlBlock implements
+ NMWebParams {
+
+ private final Configuration conf;
+ private final LocalDirAllocator logsSelector;
+ private final Context nmContext;
+ private final RecordFactory recordFactory;
+
+ @Inject
+ public ContainersLogsBlock(Configuration conf, Context context) {
+ this.conf = conf;
+ this.logsSelector = new LocalDirAllocator(NMConfig.NM_LOG_DIR);
+ this.nmContext = context;
+ this.recordFactory = RecordFactoryProvider.getRecordFactory(conf);
+ }
+
+ @Override
+ protected void render(Block html) {
+ DIV<Hamlet> div = html.div("#content");
+
+ ContainerId containerId =
+ ConverterUtils.toContainerId(this.recordFactory, $(CONTAINER_ID));
+ Container container = this.nmContext.getContainers().get(containerId);
+
+ if (container == null) {
+ div.h1(
+ "Unknown container. Container is either not yet running or "
+ + "has already completed or "
+ + "doesn't belong to this node at all.")._();
+ } else if (EnumSet.of(ContainerState.NEW, ContainerState.LOCALIZING,
+ ContainerState.LOCALIZING).contains(container.getContainerState())) {
+ div.h1("Container is not yet running. Current state is "
+ + container.getContainerState())
+ ._();
+ } else if (EnumSet.of(ContainerState.RUNNING,
+ ContainerState.EXITED_WITH_FAILURE,
+ ContainerState.EXITED_WITH_SUCCESS).contains(
+ container.getContainerState())) {
+
+ if (!$(CONTAINER_LOG_TYPE).isEmpty()) {
+ File logFile = null;
+ try {
+ logFile =
+ new File(this.logsSelector
+ .getLocalPathToRead(
+ ConverterUtils.toString(containerId.getAppId())
+ + Path.SEPARATOR + $(CONTAINER_ID)
+ + Path.SEPARATOR
+ + $(CONTAINER_LOG_TYPE), this.conf).toUri()
+ .getPath());
+ } catch (Exception e) {
+ div.h1("Cannot find this log on the local disk.")._();
+ }
+ div.h1(logFile == null ? "Unknown LogFile" : logFile.getName());
+ long start =
+ $("start").isEmpty() ? -4 * 1024 : Long.parseLong($("start"));
+ start = start < 0 ? logFile.length() + start : start;
+ start = start < 0 ? 0 : start;
+ long end =
+ $("end").isEmpty() ? logFile.length() : Long
+ .parseLong($("end"));
+ end = end < 0 ? logFile.length() + end : end;
+ end = end < 0 ? logFile.length() : end;
+ if (start > end) {
+ writer().write("Invalid start and end values!");
+ } else {
+ try {
+ long toRead = end - start;
+ if (toRead < logFile.length()) {
+ div._("Showing " + toRead + " bytes. Click ")
+ .a(url("containerlogs", $(CONTAINER_ID),
+ logFile.getName()), "here")
+ ._(" for full log").br()._();
+ }
+ // TODO: Use secure IO Utils to avoid symlink attacks.
+ //TODO Fix findBugs close warning along with IOUtils change
+ FileReader reader = new FileReader(logFile);
+ char[] cbuf = new char[65536];
+ reader.skip(start);
+ int len = 0;
+ int totalRead = 0;
+ writer().write("<pre>");
+ while ((len = reader.read(cbuf, 0, (int) toRead)) > 0
+ && totalRead < (end - start)) {
+ writer().write(cbuf, 0, len); // TODO: HTMl Quoting?
+ totalRead += len;
+ toRead = toRead - totalRead;
+ }
+ reader.close();
+ writer().write("</pre>");
+ } catch (IOException e) {
+ writer().write(
+ "Exception reading log-file "
+ + StringUtils.stringifyException(e));
+ }
+ }
+ div._();
+ } else {
+ // Just print out the log-types
+ List<File> containerLogsDirs =
+ getContainerLogDirs(this.conf, containerId);
+ for (File containerLogsDir : containerLogsDirs) {
+ for (File logFile : containerLogsDir.listFiles()) {
+ div
+ .p()
+ .a(
+ url("containerlogs", $(CONTAINER_ID),
+ logFile.getName(), "?start=-4076"),
+ logFile.getName() + " : Total file length is "
+ + logFile.length() + " bytes.")
+ ._();
+ }
+ }
+ div._();
+ }
+ } else {
+ div.h1("Container is no longer running..")._();
+ }
+ }
+
+ static List<File>
+ getContainerLogDirs(Configuration conf, ContainerId containerId) {
+ String[] logDirs =
+ conf.getStrings(NM_LOG_DIR, DEFAULT_NM_LOG_DIR);
+ List<File> containerLogDirs = new ArrayList<File>(logDirs.length);
+ for (String logDir : logDirs) {
+ String appIdStr = ConverterUtils.toString(containerId.getAppId());
+ File appLogDir = new File(logDir, appIdStr);
+ String containerIdStr = ConverterUtils.toString(containerId);
+ containerLogDirs.add(new File(appLogDir, containerIdStr));
+ }
+ return containerLogDirs;
+ }
+
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java
new file mode 100644
index 0000000..1f53817
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java
@@ -0,0 +1,84 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.webapp;
+
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
+import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+import org.apache.hadoop.yarn.webapp.view.InfoBlock;
+
+import com.google.inject.Inject;
+
+public class ContainerPage extends NMView implements NMWebParams {
+
+ @Override
+ protected void preHead(Page.HTML<_> html) {
+ commonPreHead(html);
+ setTitle("Container " + $(CONTAINER_ID));
+ set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
+ }
+
+ @Override
+ protected Class<? extends SubView> content() {
+ return ContainerBlock.class;
+ }
+
+ public static class ContainerBlock extends HtmlBlock implements NMWebParams {
+
+ private final Configuration conf;
+ private final Context nmContext;
+ private final RecordFactory recordFactory;
+
+ @Inject
+ public ContainerBlock(Configuration conf, Context nmContext) {
+ this.conf = conf;
+ this.nmContext = nmContext;
+ this.recordFactory = RecordFactoryProvider.getRecordFactory(this.conf);
+ }
+
+ @Override
+ protected void render(Block html) {
+ ContainerId containerID =
+ ConverterUtils.toContainerId(this.recordFactory, $(CONTAINER_ID));
+ Container container = this.nmContext.getContainers().get(containerID);
+ ContainerStatus containerData = container.cloneAndGetContainerStatus();
+ info("Container information")
+ ._("ContainerID", $(CONTAINER_ID))
+ ._("ContainerState", container.getContainerState())
+ ._("ExitStatus", containerData.getExitStatus())
+ ._("Diagnostics", containerData.getDiagnostics())
+ ._("User", container.getUser())
+ ._("TotalMemoryNeeded",
+ container.getLaunchContext().getResource().getMemory())
+ ._("logs", ujoin("containerlogs", $(CONTAINER_ID)), "Link to logs");
+ html._(InfoBlock.class);
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMController.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMController.java
new file mode 100644
index 0000000..ba8e41b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMController.java
@@ -0,0 +1,69 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.webapp;
+
+import static org.apache.hadoop.yarn.util.StringHelper.join;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.nodemanager.NMConfig;
+import org.apache.hadoop.yarn.webapp.Controller;
+
+import com.google.inject.Inject;
+
+public class NMController extends Controller implements NMWebParams {
+
+ @Inject
+ public NMController(Configuration nmConf, RequestContext requestContext) {
+ super(requestContext);
+ }
+
+ @Override
+ // TODO: What use of this with info() in?
+ public void index() {
+ setTitle(join("NodeManager - ", $(NM_NODENAME)));
+ }
+
+ public void info() {
+ render(NodePage.class);
+ }
+
+ public void node() {
+ render(NodePage.class);
+ }
+
+ public void allApplications() {
+ render(AllApplicationsPage.class);
+ }
+
+ public void allContainers() {
+ render(AllContainersPage.class);
+ }
+
+ public void application() {
+ render(ApplicationPage.class);
+ }
+
+ public void container() {
+ render(ContainerPage.class);
+ }
+
+ public void logs() {
+ render(ContainerLogsPage.class);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMView.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMView.java
new file mode 100644
index 0000000..dea36a9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMView.java
@@ -0,0 +1,46 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.webapp;
+
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION_ID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.THEMESWITCHER_ID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
+
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.view.TwoColumnLayout;
+
+public class NMView extends TwoColumnLayout {
+
+ @Override protected void preHead(Page.HTML<_> html) {
+ commonPreHead(html);
+ }
+
+ protected void commonPreHead(Page.HTML<_> html) {
+ html.meta_http("refresh", "10");
+ set(ACCORDION_ID, "nav");
+ set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
+ set(THEMESWITCHER_ID, "themeswitcher");
+ }
+
+ @Override
+ protected Class<? extends SubView> nav() {
+ return NavBlock.class;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebParams.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebParams.java
new file mode 100644
index 0000000..2a205a2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebParams.java
@@ -0,0 +1,26 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.webapp;
+
+public interface NMWebParams {
+ String NM_NODENAME = "nm.id";
+ String APPLICATION_ID = "nm.appId";
+ String CONTAINER_ID = "nm.containerId";
+ String CONTAINER_LOG_TYPE= "nm.containerLogType";
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java
new file mode 100644
index 0000000..01ea4aa
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java
@@ -0,0 +1,44 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.webapp;
+
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+
+public class NavBlock extends HtmlBlock implements NMWebParams {
+
+ @Override
+ protected void render(Block html) {
+ html
+ .div("#nav")
+ .h3()._("NodeManager")._() // TODO: Problem if no header like this
+ .ul()
+ .li()
+ .a(url("node"), "Node Information")._()
+ .li()
+ .a(url("allApplications"), "List of Applications")
+ ._()
+ .li()
+ .a(url("allContainers"), "List of Containers")._()
+ ._()
+ .h3()._("Tools")._()
+ ._()
+ .div("#themeswitcher")._();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java
new file mode 100644
index 0000000..064525e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java
@@ -0,0 +1,81 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.webapp;
+
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
+
+import java.util.Date;
+
+import org.apache.hadoop.util.VersionInfo;
+import org.apache.hadoop.yarn.util.YarnVersionInfo;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.ResourceView;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+import org.apache.hadoop.yarn.webapp.view.InfoBlock;
+
+import com.google.inject.Inject;
+
+public class NodePage extends NMView {
+
+ @Override
+ protected void commonPreHead(HTML<_> html) {
+ super.commonPreHead(html);
+ set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
+ }
+
+ @Override
+ protected Class<? extends SubView> content() {
+ return NodeBlock.class;
+ }
+
+ public static class NodeBlock extends HtmlBlock {
+
+ private final Context context;
+ private final ResourceView resourceView;
+
+ @Inject
+ public NodeBlock(Context context, ResourceView resourceView) {
+ this.context = context;
+ this.resourceView = resourceView;
+ }
+
+ @Override
+ protected void render(Block html) {
+ info("NodeManager information")
+ ._("Total Vmem allocated for Containers",
+ this.resourceView.getVmemAllocatedForContainers() + "bytes")
+ ._("Total Pmem allocated for Container",
+ this.resourceView.getPmemAllocatedForContainers() + "bytes")
+ ._("NodeHealthyStatus",
+ this.context.getNodeHealthStatus().getIsNodeHealthy())
+ ._("LastNodeHealthTime", new Date(
+ this.context.getNodeHealthStatus().getLastHealthReportTime()))
+ ._("NodeHealthReport",
+ this.context.getNodeHealthStatus().getHealthReport())
+ ._("Node Manger Version:", YarnVersionInfo.getBuildVersion() +
+ " on " + YarnVersionInfo.getDate())
+ ._("Hadoop Version:", VersionInfo.getBuildVersion() +
+ " on " + VersionInfo.getDate());
+ html._(InfoBlock.class);
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
new file mode 100644
index 0000000..cbf12e1
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
@@ -0,0 +1,103 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.webapp;
+
+import static org.apache.hadoop.yarn.util.StringHelper.pajoin;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.NMConfig;
+import org.apache.hadoop.yarn.server.nodemanager.ResourceView;
+import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.webapp.WebApp;
+import org.apache.hadoop.yarn.webapp.WebApps;
+
+public class WebServer extends AbstractService {
+
+ private static final Log LOG = LogFactory.getLog(WebServer.class);
+
+ private final Context nmContext;
+ private final ResourceView resourceView;
+ private WebApp webApp;
+
+ public WebServer(Context nmContext, ResourceView resourceView) {
+ super(WebServer.class.getName());
+ this.nmContext = nmContext;
+ this.resourceView = resourceView;
+ }
+
+ @Override
+ public synchronized void init(Configuration conf) {
+ super.init(conf);
+ }
+
+ @Override
+ public synchronized void start() {
+ String bindAddress = getConfig().get(NMConfig.NM_HTTP_BIND_ADDRESS,
+ NMConfig.DEFAULT_NM_HTTP_BIND_ADDRESS);
+ LOG.info("Instantiating NMWebApp at " + bindAddress);
+ try {
+ this.webApp =
+ WebApps.$for("yarn", Context.class, this.nmContext)
+ .at(bindAddress).with(getConfig())
+ .start(new NMWebApp(this.resourceView));
+ } catch (Exception e) {
+ String msg = "NMWebapps failed to start.";
+ LOG.error(msg, e);
+ throw new YarnException(msg);
+ }
+ super.start();
+ }
+
+ @Override
+ public synchronized void stop() {
+ if (this.webApp != null) {
+ this.webApp.stop();
+ }
+ super.stop();
+ }
+
+ public static class NMWebApp extends WebApp implements NMWebParams {
+
+ private final ResourceView resourceView;
+
+ public NMWebApp(ResourceView resourceView) {
+ this.resourceView = resourceView;
+ }
+
+ @Override
+ public void setup() {
+ bind(ResourceView.class).toInstance(this.resourceView);
+ route("/", NMController.class, "info");
+ route("/node", NMController.class, "node");
+ route("/allApplications", NMController.class, "allApplications");
+ route("/allContainers", NMController.class, "allContainers");
+ route(pajoin("/application", APPLICATION_ID), NMController.class,
+ "application");
+ route(pajoin("/container", CONTAINER_ID), NMController.class,
+ "container");
+ route(pajoin("/containerlogs", CONTAINER_ID, CONTAINER_LOG_TYPE),
+ NMController.class, "logs");
+ }
+
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/LocalizationProtocol.proto b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/LocalizationProtocol.proto
new file mode 100644
index 0000000..3a2705c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/LocalizationProtocol.proto
@@ -0,0 +1,10 @@
+option java_package = "org.apache.hadoop.yarn.proto";
+option java_outer_classname = "LocalizationProtocol";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+
+import "yarn_server_nodemanager_service_protos.proto";
+
+service LocalizationProtocolService {
+ rpc heartbeat(LocalizerStatusProto) returns (LocalizerHeartbeatResponseProto);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_service_protos.proto b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_service_protos.proto
new file mode 100644
index 0000000..77dd3ab
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_service_protos.proto
@@ -0,0 +1,35 @@
+option java_package = "org.apache.hadoop.yarn.proto";
+option java_outer_classname = "YarnServerNodemanagerServiceProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+
+import "yarn_protos.proto";
+
+enum ResourceStatusTypeProto {
+ FETCH_PENDING = 1;
+ FETCH_SUCCESS = 2;
+ FETCH_FAILURE = 3;
+}
+
+message LocalResourceStatusProto {
+ optional LocalResourceProto resource = 1;
+ optional ResourceStatusTypeProto status = 2;
+ optional URLProto localPath = 3;
+ optional int64 localSize = 4;
+ optional YarnRemoteExceptionProto exception = 5;
+}
+
+message LocalizerStatusProto {
+ optional string localizer_id = 1;
+ repeated LocalResourceStatusProto resources = 2;
+}
+
+enum LocalizerActionProto {
+ LIVE = 1;
+ DIE = 2;
+}
+
+message LocalizerHeartbeatResponseProto {
+ optional LocalizerActionProto action = 1;
+ repeated LocalResourceProto resources = 2;
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
new file mode 100644
index 0000000..3cfe0f7
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
@@ -0,0 +1 @@
+org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security.LocalizerSecurityInfo
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties
new file mode 100644
index 0000000..897bca3f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties
@@ -0,0 +1,29 @@
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=DEBUG,CLA
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshold=ALL
+
+#
+# ContainerLog Appender
+#
+
+#Default values
+hadoop.yarn.mr.containerLogDir=null
+hadoop.yarn.mr.totalLogFileSize=100
+
+log4j.appender.CLA=org.apache.hadoop.yarn.ContainerLogAppender
+log4j.appender.CLA.containerLogDir=${hadoop.yarn.mr.containerLogDir}
+log4j.appender.CLA.totalLogFileSize=${hadoop.yarn.mr.totalLogFileSize}
+
+log4j.appender.CLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.CLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.metrics.jvm.EventCounter
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java
new file mode 100644
index 0000000..9ac6f2c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java
@@ -0,0 +1,159 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import static org.junit.Assert.fail;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationInitedEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerExitEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerResourceLocalizedEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncher;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncherEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ApplicationLocalizationEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationRequestEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.LogAggregationService;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.event.LogAggregatorEvent;
+import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
+
+public class DummyContainerManager extends ContainerManagerImpl {
+
+ private static final Log LOG = LogFactory
+ .getLog(DummyContainerManager.class);
+
+ private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+ public DummyContainerManager(Context context, ContainerExecutor exec,
+ DeletionService deletionContext, NodeStatusUpdater nodeStatusUpdater,
+ NodeManagerMetrics metrics) {
+ super(context, exec, deletionContext, nodeStatusUpdater, metrics);
+ }
+
+ @Override
+ protected ResourceLocalizationService createResourceLocalizationService(ContainerExecutor exec,
+ DeletionService deletionContext) {
+ return new ResourceLocalizationService(super.dispatcher, exec, deletionContext) {
+ @Override
+ public void handle(LocalizationEvent event) {
+ switch (event.getType()) {
+ case INIT_APPLICATION_RESOURCES:
+ Application app =
+ ((ApplicationLocalizationEvent) event).getApplication();
+ // Simulate event from ApplicationLocalization.
+ dispatcher.getEventHandler().handle(new ApplicationInitedEvent(
+ app.getAppId()));
+ break;
+ case INIT_CONTAINER_RESOURCES:
+ ContainerLocalizationRequestEvent rsrcReqs =
+ (ContainerLocalizationRequestEvent) event;
+ // simulate localization of all requested resources
+ for (LocalResourceRequest req : rsrcReqs.getRequestedResources()) {
+ LOG.info("DEBUG: " + req + ":" +
+ rsrcReqs.getContainer().getContainerID());
+ dispatcher.getEventHandler().handle(
+ new ContainerResourceLocalizedEvent(
+ rsrcReqs.getContainer().getContainerID(), req,
+ new Path("file:///local" + req.getPath().toUri().getPath())));
+ }
+ break;
+ case CLEANUP_CONTAINER_RESOURCES:
+ Container container =
+ ((ContainerLocalizationEvent) event).getContainer();
+ // TODO: delete the container dir
+ this.dispatcher.getEventHandler().handle(
+ new ContainerEvent(container.getContainerID(),
+ ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP));
+ break;
+ case DESTROY_APPLICATION_RESOURCES:
+ Application application =
+ ((ApplicationLocalizationEvent) event).getApplication();
+
+ // decrement reference counts of all resources associated with this
+ // app
+ this.dispatcher.getEventHandler().handle(
+ new ApplicationEvent(application.getAppId(),
+ ApplicationEventType.APPLICATION_RESOURCES_CLEANEDUP));
+ break;
+ default:
+ fail("Unexpected event: " + event.getType());
+ }
+ }
+ };
+ }
+
+ @Override
+ protected ContainersLauncher createContainersLauncher(Context context,
+ ContainerExecutor exec) {
+ return new ContainersLauncher(context, super.dispatcher, exec) {
+ @Override
+ public void handle(ContainersLauncherEvent event) {
+ Container container = event.getContainer();
+ ContainerId containerId = container.getContainerID();
+ switch (event.getType()) {
+ case LAUNCH_CONTAINER:
+ dispatcher.getEventHandler().handle(
+ new ContainerEvent(containerId,
+ ContainerEventType.CONTAINER_LAUNCHED));
+ break;
+ case CLEANUP_CONTAINER:
+ dispatcher.getEventHandler().handle(
+ new ContainerExitEvent(containerId,
+ ContainerEventType.CONTAINER_KILLED_ON_REQUEST, 0));
+ break;
+ }
+ }
+ };
+ }
+
+ @Override
+ protected LogAggregationService createLogAggregationService(
+ DeletionService deletionService) {
+ return new LogAggregationService(deletionService) {
+ @Override
+ public void handle(LogAggregatorEvent event) {
+ switch (event.getType()) {
+ case APPLICATION_STARTED:
+ break;
+ case CONTAINER_FINISHED:
+ break;
+ case APPLICATION_FINISHED:
+ break;
+ default:
+ // Ignore
+ }
+ }
+ };
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/LocalRMInterface.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/LocalRMInterface.java
new file mode 100644
index 0000000..a16f723
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/LocalRMInterface.java
@@ -0,0 +1,48 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.api.ResourceTracker;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.records.RegistrationResponse;
+
+public class LocalRMInterface implements ResourceTracker {
+
+ private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+ @Override
+ public RegisterNodeManagerResponse registerNodeManager(RegisterNodeManagerRequest request) throws YarnRemoteException {
+ RegistrationResponse registrationResponse = recordFactory.newRecordInstance(RegistrationResponse.class);
+ RegisterNodeManagerResponse response = recordFactory.newRecordInstance(RegisterNodeManagerResponse.class);
+ response.setRegistrationResponse(registrationResponse);
+ return response;
+ }
+
+ @Override
+ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) throws YarnRemoteException {
+ NodeHeartbeatResponse response = recordFactory.newRecordInstance(NodeHeartbeatResponse.class);
+ return response;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
new file mode 100644
index 0000000..54ee1f4
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
@@ -0,0 +1,134 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManager;
+import org.junit.After;
+
+public class TestContainerManagerWithLCE extends TestContainerManager {
+
+ private static final Log LOG = LogFactory
+ .getLog(TestContainerManagerWithLCE.class);
+
+ public TestContainerManagerWithLCE() throws UnsupportedFileSystemException {
+ super();
+ }
+
+ static {
+ localDir =
+ new File("target",
+ TestContainerManagerWithLCE.class.getName() + "-localDir")
+ .getAbsoluteFile();
+ tmpDir = new File("target",
+ TestContainerManagerWithLCE.class.getName() + "-tmpDir");
+ }
+
+ @Override
+ public void setup() throws IOException {
+ // Don't run the test if the binary is not available.
+ if (!shouldRunTest()) {
+ LOG.info("LCE binary path is not passed. Not running the test");
+ return;
+ }
+ super.setup();
+ localFS.setPermission(new Path(localDir.getCanonicalPath()),
+ new FsPermission(
+ (short) 0777));
+ localFS.setPermission(new Path(tmpDir.getCanonicalPath()),
+ new FsPermission(
+ (short) 0777));
+ }
+
+ @After
+ @Override
+ public void tearDown() throws IOException, InterruptedException {
+ if (shouldRunTest()) {
+ super.tearDown();
+ }
+ }
+
+ @Override
+ public void testContainerSetup() throws IOException, InterruptedException {
+ // Don't run the test if the binary is not available.
+ if (!shouldRunTest()) {
+ LOG.info("LCE binary path is not passed. Not running the test");
+ return;
+ }
+ LOG.info("Running testContainerSetup");
+ super.testContainerSetup();
+ }
+
+ @Override
+ public void testContainerManagerInitialization() throws IOException {
+ // Don't run the test if the binary is not available.
+ if (!shouldRunTest()) {
+ LOG.info("LCE binary path is not passed. Not running the test");
+ return;
+ }
+ LOG.info("Running testContainerManagerInitialization");
+ super.testContainerManagerInitialization();
+ }
+
+ @Override
+ public void testContainerLaunchAndStop() throws IOException,
+ InterruptedException {
+ // Don't run the test if the binary is not available.
+ if (!shouldRunTest()) {
+ LOG.info("LCE binary path is not passed. Not running the test");
+ return;
+ }
+ LOG.info("Running testContainerLaunchAndStop");
+ super.testContainerLaunchAndStop();
+ }
+
+ @Override
+ public void testLocalFilesCleanup() throws InterruptedException,
+ IOException {
+ // Don't run the test if the binary is not available.
+ if (!shouldRunTest()) {
+ LOG.info("LCE binary path is not passed. Not running the test");
+ return;
+ }
+ LOG.info("Running testLocalFilesCleanup");
+ super.testLocalFilesCleanup();
+ }
+
+ private boolean shouldRunTest() {
+ return System
+ .getProperty(LinuxContainerExecutor.CONTAINER_EXECUTOR_EXEC_KEY) != null;
+ }
+
+ @Override
+ protected ContainerExecutor createContainerExecutor() {
+ super.conf.set(LinuxContainerExecutor.CONTAINER_EXECUTOR_EXEC_KEY, System
+ .getProperty(LinuxContainerExecutor.CONTAINER_EXECUTOR_EXEC_KEY));
+ LinuxContainerExecutor linuxContainerExecutor =
+ new LinuxContainerExecutor();
+ linuxContainerExecutor.setConf(super.conf);
+ return linuxContainerExecutor;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDefaultContainerExecutor.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDefaultContainerExecutor.java
new file mode 100644
index 0000000..555f7ac
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDefaultContainerExecutor.java
@@ -0,0 +1,218 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import java.io.InputStream;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.AbstractFileSystem;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Options.CreateOpts;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.FakeFSDataInputStream;
+
+import static org.apache.hadoop.fs.CreateFlag.*;
+
+
+import org.junit.AfterClass;
+import org.junit.Test;
+import static org.junit.Assert.*;
+import org.mockito.ArgumentMatcher;
+import org.mockito.Matchers;
+import static org.mockito.Mockito.*;
+
+public class TestDefaultContainerExecutor {
+
+ /*
+ // XXX FileContext cannot be mocked to do this
+ static FSDataInputStream getRandomStream(Random r, int len)
+ throws IOException {
+ byte[] bytes = new byte[len];
+ r.nextBytes(bytes);
+ DataInputBuffer buf = new DataInputBuffer();
+ buf.reset(bytes, 0, bytes.length);
+ return new FSDataInputStream(new FakeFSDataInputStream(buf));
+ }
+
+ class PathEndsWith extends ArgumentMatcher<Path> {
+ final String suffix;
+ PathEndsWith(String suffix) {
+ this.suffix = suffix;
+ }
+ @Override
+ public boolean matches(Object o) {
+ return
+ suffix.equals(((Path)o).getName());
+ }
+ }
+
+ DataOutputBuffer mockStream(
+ AbstractFileSystem spylfs, Path p, Random r, int len)
+ throws IOException {
+ DataOutputBuffer dob = new DataOutputBuffer();
+ doReturn(getRandomStream(r, len)).when(spylfs).open(p);
+ doReturn(new FileStatus(len, false, -1, -1L, -1L, p)).when(
+ spylfs).getFileStatus(argThat(new PathEndsWith(p.getName())));
+ doReturn(new FSDataOutputStream(dob)).when(spylfs).createInternal(
+ argThat(new PathEndsWith(p.getName())),
+ eq(EnumSet.of(OVERWRITE)),
+ Matchers.<FsPermission>anyObject(), anyInt(), anyShort(), anyLong(),
+ Matchers.<Progressable>anyObject(), anyInt(), anyBoolean());
+ return dob;
+ }
+ */
+
+ @AfterClass
+ public static void deleteTmpFiles() throws IOException {
+ FileContext lfs = FileContext.getLocalFSFileContext();
+ lfs.delete(new Path("target",
+ TestDefaultContainerExecutor.class.getSimpleName()), true);
+ }
+
+ byte[] createTmpFile(Path dst, Random r, int len)
+ throws IOException {
+ // use unmodified local context
+ FileContext lfs = FileContext.getLocalFSFileContext();
+ dst = lfs.makeQualified(dst);
+ lfs.mkdir(dst.getParent(), null, true);
+ byte[] bytes = new byte[len];
+ FSDataOutputStream out = null;
+ try {
+ out = lfs.create(dst, EnumSet.of(CREATE, OVERWRITE));
+ r.nextBytes(bytes);
+ out.write(bytes);
+ } finally {
+ if (out != null) out.close();
+ }
+ return bytes;
+ }
+
+// @Test
+// public void testInit() throws IOException, InterruptedException {
+// Configuration conf = new Configuration();
+// AbstractFileSystem spylfs =
+// spy(FileContext.getLocalFSFileContext().getDefaultFileSystem());
+// // don't actually create dirs
+// //doNothing().when(spylfs).mkdir(Matchers.<Path>anyObject(),
+// // Matchers.<FsPermission>anyObject(), anyBoolean());
+// FileContext lfs = FileContext.getFileContext(spylfs, conf);
+//
+// Path basedir = new Path("target",
+// TestDefaultContainerExecutor.class.getSimpleName());
+// List<String> localDirs = new ArrayList<String>();
+// List<Path> localPaths = new ArrayList<Path>();
+// for (int i = 0; i < 4; ++i) {
+// Path p = new Path(basedir, i + "");
+// lfs.mkdir(p, null, true);
+// localPaths.add(p);
+// localDirs.add(p.toString());
+// }
+// final String user = "yak";
+// final String appId = "app_RM_0";
+// final Path logDir = new Path(basedir, "logs");
+// final Path nmLocal = new Path(basedir, "nmPrivate/" + user + "/" + appId);
+// final InetSocketAddress nmAddr = new InetSocketAddress("foobar", 4344);
+// System.out.println("NMLOCAL: " + nmLocal);
+// Random r = new Random();
+//
+// /*
+// // XXX FileContext cannot be reasonably mocked to do this
+// // mock jobFiles copy
+// long fileSeed = r.nextLong();
+// r.setSeed(fileSeed);
+// System.out.println("SEED: " + seed);
+// Path fileCachePath = new Path(nmLocal, ApplicationLocalizer.FILECACHE_FILE);
+// DataOutputBuffer fileCacheBytes = mockStream(spylfs, fileCachePath, r, 512);
+//
+// // mock jobTokens copy
+// long jobSeed = r.nextLong();
+// r.setSeed(jobSeed);
+// System.out.println("SEED: " + seed);
+// Path jobTokenPath = new Path(nmLocal, ApplicationLocalizer.JOBTOKEN_FILE);
+// DataOutputBuffer jobTokenBytes = mockStream(spylfs, jobTokenPath, r, 512);
+// */
+//
+// // create jobFiles
+// long fileSeed = r.nextLong();
+// r.setSeed(fileSeed);
+// System.out.println("SEED: " + fileSeed);
+// Path fileCachePath = new Path(nmLocal, ApplicationLocalizer.FILECACHE_FILE);
+// byte[] fileCacheBytes = createTmpFile(fileCachePath, r, 512);
+//
+// // create jobTokens
+// long jobSeed = r.nextLong();
+// r.setSeed(jobSeed);
+// System.out.println("SEED: " + jobSeed);
+// Path jobTokenPath = new Path(nmLocal, ApplicationLocalizer.JOBTOKEN_FILE);
+// byte[] jobTokenBytes = createTmpFile(jobTokenPath, r, 512);
+//
+// DefaultContainerExecutor dce = new DefaultContainerExecutor(lfs);
+// Localization mockLocalization = mock(Localization.class);
+// ApplicationLocalizer spyLocalizer =
+// spy(new ApplicationLocalizer(lfs, user, appId, logDir,
+// localPaths));
+// // ignore cache localization
+// doNothing().when(spyLocalizer).localizeFiles(
+// Matchers.<Localization>anyObject(), Matchers.<Path>anyObject());
+// Path workingDir = lfs.getWorkingDirectory();
+// dce.initApplication(spyLocalizer, nmLocal, mockLocalization, localPaths);
+// lfs.setWorkingDirectory(workingDir);
+//
+// for (Path localdir : localPaths) {
+// Path userdir = lfs.makeQualified(new Path(localdir,
+// new Path(ApplicationLocalizer.USERCACHE, user)));
+// // $localdir/$user
+// verify(spylfs).mkdir(userdir,
+// new FsPermission(ApplicationLocalizer.USER_PERM), true);
+// // $localdir/$user/appcache
+// Path jobdir = new Path(userdir, ApplicationLocalizer.appcache);
+// verify(spylfs).mkdir(jobdir,
+// new FsPermission(ApplicationLocalizer.appcache_PERM), true);
+// // $localdir/$user/filecache
+// Path filedir = new Path(userdir, ApplicationLocalizer.FILECACHE);
+// verify(spylfs).mkdir(filedir,
+// new FsPermission(ApplicationLocalizer.FILECACHE_PERM), true);
+// // $localdir/$user/appcache/$appId
+// Path appdir = new Path(jobdir, appId);
+// verify(spylfs).mkdir(appdir,
+// new FsPermission(ApplicationLocalizer.APPDIR_PERM), true);
+// // $localdir/$user/appcache/$appId/work
+// Path workdir = new Path(appdir, ApplicationLocalizer.WORKDIR);
+// verify(spylfs, atMost(1)).mkdir(workdir, FsPermission.getDefault(), true);
+// }
+// // $logdir/$appId
+// Path logdir = new Path(lfs.makeQualified(logDir), appId);
+// verify(spylfs).mkdir(logdir,
+// new FsPermission(ApplicationLocalizer.LOGDIR_PERM), true);
+// }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java
new file mode 100644
index 0000000..abaad22
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java
@@ -0,0 +1,150 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+
+
+import org.junit.AfterClass;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+public class TestDeletionService {
+
+ private static final FileContext lfs = getLfs();
+ private static final FileContext getLfs() {
+ try {
+ return FileContext.getLocalFSFileContext();
+ } catch (UnsupportedFileSystemException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ private static final Path base =
+ lfs.makeQualified(new Path("target", TestDeletionService.class.getName()));
+
+ @AfterClass
+ public static void removeBase() throws IOException {
+ lfs.delete(base, true);
+ }
+
+ public List<Path> buildDirs(Random r, Path root, int numpaths)
+ throws IOException {
+ ArrayList<Path> ret = new ArrayList<Path>();
+ for (int i = 0; i < numpaths; ++i) {
+ Path p = root;
+ long name = r.nextLong();
+ do {
+ p = new Path(p, "" + name);
+ name = r.nextLong();
+ } while (0 == (name % 2));
+ ret.add(p);
+ }
+ return ret;
+ }
+
+ public void createDirs(Path base, List<Path> dirs) throws IOException {
+ for (Path dir : dirs) {
+ lfs.mkdir(new Path(base, dir), null, true);
+ }
+ }
+
+ static class FakeDefaultContainerExecutor extends DefaultContainerExecutor {
+ @Override
+ public void deleteAsUser(String user, Path subDir, Path... basedirs)
+ throws IOException, InterruptedException {
+ if ((Long.parseLong(subDir.getName()) % 2) == 0) {
+ assertNull(user);
+ } else {
+ assertEquals("dingo", user);
+ }
+ super.deleteAsUser(user, subDir, basedirs);
+ assertFalse(lfs.util().exists(subDir));
+ }
+ }
+
+ @Test
+ public void testAbsDelete() throws Exception {
+ Random r = new Random();
+ long seed = r.nextLong();
+ r.setSeed(seed);
+ System.out.println("SEED: " + seed);
+ List<Path> dirs = buildDirs(r, base, 20);
+ createDirs(new Path("."), dirs);
+ FakeDefaultContainerExecutor exec = new FakeDefaultContainerExecutor();
+ Configuration conf = new Configuration();
+ exec.setConf(conf);
+ DeletionService del = new DeletionService(exec);
+ del.init(conf);
+ del.start();
+ try {
+ for (Path p : dirs) {
+ del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",
+ p, null);
+ }
+ } finally {
+ del.stop();
+ }
+ for (Path p : dirs) {
+ assertFalse(lfs.util().exists(p));
+ }
+ }
+
+ @Test
+ public void testRelativeDelete() throws Exception {
+ Random r = new Random();
+ long seed = r.nextLong();
+ r.setSeed(seed);
+ System.out.println("SEED: " + seed);
+ List<Path> baseDirs = buildDirs(r, base, 4);
+ createDirs(new Path("."), baseDirs);
+ List<Path> content = buildDirs(r, new Path("."), 10);
+ for (Path b : baseDirs) {
+ createDirs(b, content);
+ }
+ DeletionService del =
+ new DeletionService(new FakeDefaultContainerExecutor());
+ del.init(new Configuration());
+ del.start();
+ try {
+ for (Path p : content) {
+ assertTrue(lfs.util().exists(new Path(baseDirs.get(0), p)));
+ del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",
+ p, baseDirs.toArray(new Path[4]));
+ }
+ } finally {
+ del.stop();
+ }
+ for (Path p : baseDirs) {
+ for (Path q : content) {
+ assertFalse(lfs.util().exists(new Path(p, q)));
+ }
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
new file mode 100644
index 0000000..b962da6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
@@ -0,0 +1,125 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.NodeHealthCheckerService;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.api.ResourceTracker;
+import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
+import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
+import org.junit.Test;
+
+public class TestEventFlow {
+
+ private static final Log LOG = LogFactory.getLog(TestEventFlow.class);
+ private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+ private static File localDir = new File("target",
+ TestEventFlow.class.getName() + "-localDir").getAbsoluteFile();
+ private static File localLogDir = new File("target",
+ TestEventFlow.class.getName() + "-localLogDir").getAbsoluteFile();
+ private static File remoteLogDir = new File("target",
+ TestEventFlow.class.getName() + "-remoteLogDir").getAbsoluteFile();
+
+ @Test
+ public void testSuccessfulContainerLaunch() throws InterruptedException,
+ IOException {
+
+ FileContext localFS = FileContext.getLocalFSFileContext();
+
+ localFS.delete(new Path(localDir.getAbsolutePath()), true);
+ localFS.delete(new Path(localLogDir.getAbsolutePath()), true);
+ localFS.delete(new Path(remoteLogDir.getAbsolutePath()), true);
+ localDir.mkdir();
+ localLogDir.mkdir();
+ remoteLogDir.mkdir();
+
+ Context context = new NMContext();
+
+ YarnConfiguration conf = new YarnConfiguration();
+ conf.set(NMConfig.NM_LOCAL_DIR, localDir.getAbsolutePath());
+ conf.set(NMConfig.NM_LOG_DIR, localLogDir.getAbsolutePath());
+ conf.set(NMConfig.REMOTE_USER_LOG_DIR, remoteLogDir.getAbsolutePath());
+
+ ContainerExecutor exec = new DefaultContainerExecutor();
+ exec.setConf(conf);
+ DeletionService del = new DeletionService(exec);
+ Dispatcher dispatcher = new AsyncDispatcher();
+ NodeHealthCheckerService healthChecker = null;
+ NodeManagerMetrics metrics = NodeManagerMetrics.create();
+ NodeStatusUpdater nodeStatusUpdater =
+ new NodeStatusUpdaterImpl(context, dispatcher, healthChecker, metrics) {
+ @Override
+ protected ResourceTracker getRMClient() {
+ return new LocalRMInterface();
+ };
+
+ @Override
+ protected void startStatusUpdater() {
+ return; // Don't start any updating thread.
+ }
+ };
+
+ DummyContainerManager containerManager =
+ new DummyContainerManager(context, exec, del, nodeStatusUpdater, metrics);
+ containerManager.init(conf);
+ containerManager.start();
+
+ ContainerLaunchContext launchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
+ ContainerId cID = recordFactory.newRecordInstance(ContainerId.class);
+ cID.setAppId(recordFactory.newRecordInstance(ApplicationId.class));
+ launchContext.setContainerId(cID);
+ launchContext.setUser("testing");
+ launchContext.setResource(recordFactory.newRecordInstance(Resource.class));
+ StartContainerRequest request = recordFactory.newRecordInstance(StartContainerRequest.class);
+ request.setContainerLaunchContext(launchContext);
+ containerManager.startContainer(request);
+
+ BaseContainerManagerTest.waitForContainerState(containerManager, cID,
+ ContainerState.RUNNING);
+
+ StopContainerRequest stopRequest = recordFactory.newRecordInstance(StopContainerRequest.class);
+ stopRequest.setContainerId(cID);
+ containerManager.stopContainer(stopRequest);
+ BaseContainerManagerTest.waitForContainerState(containerManager, cID,
+ ContainerState.COMPLETE);
+
+ containerManager.stop();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
new file mode 100644
index 0000000..aa76a7e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
@@ -0,0 +1,168 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.IOException;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.security.AccessControlException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestLinuxContainerExecutor {
+//
+// private static final Log LOG = LogFactory
+// .getLog(TestLinuxContainerExecutor.class);
+//
+// // TODO: FIXME
+// private static File workSpace = new File("target",
+// TestLinuxContainerExecutor.class.getName() + "-workSpace");
+//
+// @Before
+// public void setup() throws IOException {
+// FileContext.getLocalFSFileContext().mkdir(
+// new Path(workSpace.getAbsolutePath()), null, true);
+// workSpace.setReadable(true, false);
+// workSpace.setExecutable(true, false);
+// workSpace.setWritable(true, false);
+// }
+//
+// @After
+// public void tearDown() throws AccessControlException, FileNotFoundException,
+// UnsupportedFileSystemException, IOException {
+// FileContext.getLocalFSFileContext().delete(
+// new Path(workSpace.getAbsolutePath()), true);
+// }
+//
+ @Test
+ public void testCommandFilePreparation() throws IOException {
+// LinuxContainerExecutor executor = new LinuxContainerExecutor(new String[] {
+// "/bin/echo", "hello" }, null, null, "nobody"); // TODO: fix user name
+// executor.prepareCommandFile(workSpace.getAbsolutePath());
+//
+// // Now verify the contents of the commandFile
+// File commandFile = new File(workSpace, LinuxContainerExecutor.COMMAND_FILE);
+// BufferedReader reader = new BufferedReader(new FileReader(commandFile));
+// Assert.assertEquals("/bin/echo hello", reader.readLine());
+// Assert.assertEquals(null, reader.readLine());
+// Assert.assertTrue(commandFile.canExecute());
+ }
+//
+// @Test
+// public void testContainerLaunch() throws IOException {
+// String containerExecutorPath = System
+// .getProperty("container-executor-path");
+// if (containerExecutorPath == null || containerExecutorPath.equals("")) {
+// LOG.info("Not Running test for lack of container-executor-path");
+// return;
+// }
+//
+// String applicationSubmitter = "nobody";
+//
+// File touchFile = new File(workSpace, "touch-file");
+// LinuxContainerExecutor executor = new LinuxContainerExecutor(new String[] {
+// "touch", touchFile.getAbsolutePath() }, workSpace, null,
+// applicationSubmitter);
+// executor.setCommandExecutorPath(containerExecutorPath);
+// executor.execute();
+//
+// FileStatus fileStatus = FileContext.getLocalFSFileContext().getFileStatus(
+// new Path(touchFile.getAbsolutePath()));
+// Assert.assertEquals(applicationSubmitter, fileStatus.getOwner());
+// }
+//
+// @Test
+// public void testContainerKill() throws IOException, InterruptedException,
+// IllegalArgumentException, SecurityException, IllegalAccessException,
+// NoSuchFieldException {
+// String containerExecutorPath = System
+// .getProperty("container-executor-path");
+// if (containerExecutorPath == null || containerExecutorPath.equals("")) {
+// LOG.info("Not Running test for lack of container-executor-path");
+// return;
+// }
+//
+// String applicationSubmitter = "nobody";
+// final LinuxContainerExecutor executor = new LinuxContainerExecutor(
+// new String[] { "sleep", "100" }, workSpace, null, applicationSubmitter);
+// executor.setCommandExecutorPath(containerExecutorPath);
+// new Thread() {
+// public void run() {
+// try {
+// executor.execute();
+// } catch (IOException e) {
+// // TODO Auto-generated catch block
+// e.printStackTrace();
+// }
+// };
+// }.start();
+//
+// String pid;
+// while ((pid = executor.getPid()) == null) {
+// LOG.info("Sleeping for 5 seconds before checking if "
+// + "the process is alive.");
+// Thread.sleep(5000);
+// }
+// LOG.info("Going to check the liveliness of the process with pid " + pid);
+//
+// LinuxContainerExecutor checkLiveliness = new LinuxContainerExecutor(
+// new String[] { "kill", "-0", "-" + pid }, workSpace, null,
+// applicationSubmitter);
+// checkLiveliness.setCommandExecutorPath(containerExecutorPath);
+// checkLiveliness.execute();
+//
+// LOG.info("Process is alive. "
+// + "Sleeping for 5 seconds before killing the process.");
+// Thread.sleep(5000);
+// LOG.info("Going to killing the process.");
+//
+// executor.kill();
+//
+// LOG.info("Sleeping for 5 seconds before checking if "
+// + "the process is alive.");
+// Thread.sleep(5000);
+// LOG.info("Going to check the liveliness of the process.");
+//
+// // TODO: fix
+// checkLiveliness = new LinuxContainerExecutor(new String[] { "kill", "-0",
+// "-" + pid }, workSpace, null, applicationSubmitter);
+// checkLiveliness.setCommandExecutorPath(containerExecutorPath);
+// boolean success = false;
+// try {
+// checkLiveliness.execute();
+// success = true;
+// } catch (IOException e) {
+// success = false;
+// }
+//
+// Assert.assertFalse(success);
+// }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
new file mode 100644
index 0000000..d208b2f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
@@ -0,0 +1,244 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.NodeHealthCheckerService;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.api.ResourceTracker;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
+import org.apache.hadoop.yarn.server.api.records.NodeStatus;
+import org.apache.hadoop.yarn.server.api.records.RegistrationResponse;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl;
+import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
+import org.apache.hadoop.yarn.service.Service.STATE;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestNodeStatusUpdater {
+
+ static final Log LOG = LogFactory.getLog(TestNodeStatusUpdater.class);
+ static final Path basedir =
+ new Path("target", TestNodeStatusUpdater.class.getName());
+ private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+ int heartBeatID = 0;
+ volatile Error nmStartError = null;
+
+ private class MyResourceTracker implements ResourceTracker {
+
+ private Context context;
+
+ public MyResourceTracker(Context context) {
+ this.context = context;
+ }
+
+ @Override
+ public RegisterNodeManagerResponse registerNodeManager(RegisterNodeManagerRequest request) throws YarnRemoteException {
+ NodeId nodeId = request.getNodeId();
+ Resource resource = request.getResource();
+ LOG.info("Registering " + nodeId.toString());
+ try {
+ Assert.assertEquals(InetAddress.getLocalHost().getHostAddress()
+ + ":12345", nodeId.toString());
+ } catch (UnknownHostException e) {
+ Assert.fail(e.getMessage());
+ }
+ Assert.assertEquals(5 * 1024, resource.getMemory());
+ RegistrationResponse regResponse = recordFactory.newRecordInstance(RegistrationResponse.class);
+
+ RegisterNodeManagerResponse response = recordFactory.newRecordInstance(RegisterNodeManagerResponse.class);
+ response.setRegistrationResponse(regResponse);
+ return response;
+ }
+
+ ApplicationId applicationID = recordFactory.newRecordInstance(ApplicationId.class);
+ ContainerId firstContainerID = recordFactory.newRecordInstance(ContainerId.class);
+ ContainerId secondContainerID = recordFactory.newRecordInstance(ContainerId.class);
+
+ @Override
+ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) throws YarnRemoteException {
+ NodeStatus nodeStatus = request.getNodeStatus();
+ LOG.info("Got heartbeat number " + heartBeatID);
+ nodeStatus.setResponseId(heartBeatID++);
+ if (heartBeatID == 1) {
+ Assert.assertEquals(0, nodeStatus.getAllContainers().size());
+
+ // Give a container to the NM.
+ applicationID.setId(heartBeatID);
+ firstContainerID.setAppId(applicationID);
+ firstContainerID.setId(heartBeatID);
+ ContainerLaunchContext launchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
+ launchContext.setContainerId(firstContainerID);
+ launchContext.setResource(recordFactory.newRecordInstance(Resource.class));
+ launchContext.getResource().setMemory(2);
+ Container container = new ContainerImpl(null, launchContext, null, null);
+ this.context.getContainers().put(firstContainerID, container);
+ } else if (heartBeatID == 2) {
+ // Checks on the RM end
+ Assert.assertEquals("Number of applications should only be one!", 1,
+ nodeStatus.getAllContainers().size());
+ Assert.assertEquals("Number of container for the app should be one!",
+ 1, nodeStatus.getContainers(applicationID).size());
+ Assert.assertEquals(2, nodeStatus.getContainers(applicationID).get(0)
+ .getResource().getMemory());
+
+ // Checks on the NM end
+ ConcurrentMap<ContainerId, Container> activeContainers =
+ this.context.getContainers();
+ Assert.assertEquals(1, activeContainers.size());
+
+ // Give another container to the NM.
+ applicationID.setId(heartBeatID);
+ secondContainerID.setAppId(applicationID);
+ secondContainerID.setId(heartBeatID);
+ ContainerLaunchContext launchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
+ launchContext.setContainerId(secondContainerID);
+ launchContext.setResource(recordFactory.newRecordInstance(Resource.class));
+ launchContext.getResource().setMemory(3);
+ Container container = new ContainerImpl(null, launchContext, null, null);
+ this.context.getContainers().put(secondContainerID, container);
+ } else if (heartBeatID == 3) {
+ // Checks on the RM end
+ Assert.assertEquals("Number of applications should only be one!", 1,
+ nodeStatus.getAllContainers().size());
+ Assert.assertEquals("Number of container for the app should be two!",
+ 2, nodeStatus.getContainers(applicationID).size());
+ Assert.assertEquals(2, nodeStatus.getContainers(applicationID).get(0)
+ .getResource().getMemory());
+ Assert.assertEquals(3, nodeStatus.getContainers(applicationID).get(1)
+ .getResource().getMemory());
+
+ // Checks on the NM end
+ ConcurrentMap<ContainerId, Container> activeContainers =
+ this.context.getContainers();
+ Assert.assertEquals(2, activeContainers.size());
+ }
+ HeartbeatResponse response = recordFactory.newRecordInstance(HeartbeatResponse.class);
+ response.setResponseId(heartBeatID);
+
+ NodeHeartbeatResponse nhResponse = recordFactory.newRecordInstance(NodeHeartbeatResponse.class);
+ nhResponse.setHeartbeatResponse(response);
+ return nhResponse;
+ }
+ }
+
+ private class MyNodeStatusUpdater extends NodeStatusUpdaterImpl {
+ private Context context;
+
+ public MyNodeStatusUpdater(Context context, Dispatcher dispatcher,
+ NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics) {
+ super(context, dispatcher, healthChecker, metrics);
+ this.context = context;
+ }
+
+ @Override
+ protected ResourceTracker getRMClient() {
+ return new MyResourceTracker(this.context);
+ }
+ }
+
+ @Before
+ public void clearError() {
+ nmStartError = null;
+ }
+
+ @After
+ public void deleteBaseDir() throws IOException {
+ FileContext lfs = FileContext.getLocalFSFileContext();
+ lfs.delete(basedir, true);
+ }
+
+ @Test
+ public void testNMRegistration() throws InterruptedException {
+ final NodeManager nm = new NodeManager() {
+ @Override
+ protected NodeStatusUpdater createNodeStatusUpdater(Context context,
+ Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
+ return new MyNodeStatusUpdater(context, dispatcher, healthChecker,
+ metrics);
+ }
+ };
+
+ YarnConfiguration conf = new YarnConfiguration();
+ conf.setInt(NMConfig.NM_VMEM_GB, 5); // 5GB
+ conf.set(NMConfig.NM_BIND_ADDRESS, "127.0.0.1:12345");
+ conf.set(NMConfig.NM_LOCALIZER_BIND_ADDRESS, "127.0.0.1:12346");
+ conf.set(NMConfig.NM_LOG_DIR, new Path(basedir, "logs").toUri().getPath());
+ conf.set(NMConfig.REMOTE_USER_LOG_DIR, new Path(basedir, "remotelogs")
+ .toUri().getPath());
+ conf.set(NMConfig.NM_LOCAL_DIR, new Path(basedir, "nm0").toUri().getPath());
+ nm.init(conf);
+ new Thread() {
+ public void run() {
+ try {
+ nm.start();
+ } catch (Error e) {
+ TestNodeStatusUpdater.this.nmStartError = e;
+ }
+ }
+ }.start();
+
+ System.out.println(" ----- thread already started.."
+ + nm.getServiceState());
+
+ int waitCount = 0;
+ while (nm.getServiceState() == STATE.INITED && waitCount++ != 20) {
+ LOG.info("Waiting for NM to start..");
+ Thread.sleep(1000);
+ }
+ if (nmStartError != null) {
+ throw nmStartError;
+ }
+ if (nm.getServiceState() != STATE.STARTED) {
+ // NM could have failed.
+ Assert.fail("NodeManager failed to start");
+ }
+
+ while (heartBeatID <= 3) {
+ Thread.sleep(500);
+ }
+
+ nm.stop();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestRPCFactories.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestRPCFactories.java
new file mode 100644
index 0000000..fdb1007
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestRPCFactories.java
@@ -0,0 +1,111 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.factories.impl.pb.RpcClientFactoryPBImpl;
+import org.apache.hadoop.yarn.factories.impl.pb.RpcServerFactoryPBImpl;
+import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol;
+import org.junit.Test;
+
+public class TestRPCFactories {
+
+
+
+ @Test
+ public void test() {
+ testPbServerFactory();
+
+ testPbClientFactory();
+ }
+
+
+
+ private void testPbServerFactory() {
+ InetSocketAddress addr = new InetSocketAddress(0);
+ Configuration conf = new Configuration();
+ LocalizationProtocol instance = new LocalizationProtocolTestImpl();
+ Server server = null;
+ try {
+ server =
+ RpcServerFactoryPBImpl.get().getServer(
+ LocalizationProtocol.class, instance, addr, conf, null, 1);
+ server.start();
+ } catch (YarnException e) {
+ e.printStackTrace();
+ Assert.fail("Failed to create server");
+ } finally {
+ if (server != null) {
+ server.stop();
+ }
+ }
+ }
+
+
+ private void testPbClientFactory() {
+ InetSocketAddress addr = new InetSocketAddress(0);
+ System.err.println(addr.getHostName() + addr.getPort());
+ Configuration conf = new Configuration();
+ LocalizationProtocol instance = new LocalizationProtocolTestImpl();
+ Server server = null;
+ try {
+ server =
+ RpcServerFactoryPBImpl.get().getServer(
+ LocalizationProtocol.class, instance, addr, conf, null, 1);
+ server.start();
+ System.err.println(server.getListenerAddress());
+ System.err.println(NetUtils.getConnectAddress(server));
+
+ try {
+ LocalizationProtocol client = (LocalizationProtocol)
+ RpcClientFactoryPBImpl.get().getClient(
+ LocalizationProtocol.class, 1,
+ NetUtils.getConnectAddress(server), conf);
+ Assert.assertNotNull(client);
+ } catch (YarnException e) {
+ e.printStackTrace();
+ Assert.fail("Failed to create client");
+ }
+
+ } catch (YarnException e) {
+ e.printStackTrace();
+ Assert.fail("Failed to create server");
+ } finally {
+ server.stop();
+ }
+ }
+
+ public class LocalizationProtocolTestImpl implements LocalizationProtocol {
+
+ @Override
+ public LocalizerHeartbeatResponse heartbeat(LocalizerStatus status) {
+ return null;
+ }
+
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestRecordFactory.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestRecordFactory.java
new file mode 100644
index 0000000..157134c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestRecordFactory.java
@@ -0,0 +1,46 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb.LocalizerHeartbeatResponsePBImpl;
+
+import org.junit.Test;
+import junit.framework.Assert;
+
+public class TestRecordFactory {
+
+ @Test
+ public void testPbRecordFactory() {
+ RecordFactory pbRecordFactory = RecordFactoryPBImpl.get();
+
+ try {
+ LocalizerHeartbeatResponse response = pbRecordFactory.newRecordInstance(
+ LocalizerHeartbeatResponse.class);
+ Assert.assertEquals(LocalizerHeartbeatResponsePBImpl.class,
+ response.getClass());
+ } catch (YarnException e) {
+ e.printStackTrace();
+ Assert.fail("Failed to crete record");
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/TestPBLocalizerRPC.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/TestPBLocalizerRPC.java
new file mode 100644
index 0000000..8b8ff98
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/TestPBLocalizerRPC.java
@@ -0,0 +1,83 @@
+package org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb;
+
+import java.net.InetSocketAddress;
+
+import org.apache.avro.ipc.Server;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerAction;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+public class TestPBLocalizerRPC {
+
+ static final RecordFactory recordFactory = createPBRecordFactory();
+
+ static RecordFactory createPBRecordFactory() {
+ Configuration conf = new Configuration();
+ conf.set(RecordFactoryProvider.RPC_SERIALIZER_KEY, "protocolbuffers");
+ return RecordFactoryProvider.getRecordFactory(conf);
+ }
+
+ static class LocalizerService implements LocalizationProtocol {
+ private final InetSocketAddress locAddr;
+ private Server server;
+ LocalizerService(InetSocketAddress locAddr) {
+ this.locAddr = locAddr;
+ }
+
+ public void start() {
+ Configuration conf = new Configuration();
+ YarnRPC rpc = YarnRPC.create(conf);
+ server = rpc.getServer(
+ LocalizationProtocol.class, this, locAddr, conf, null, 1);
+ server.start();
+ }
+
+ public void stop() {
+ if (server != null) {
+ server.close();
+ }
+ }
+
+ @Override
+ public LocalizerHeartbeatResponse heartbeat(LocalizerStatus status) {
+ return dieHBResponse();
+ }
+ }
+
+ static LocalizerHeartbeatResponse dieHBResponse() {
+ LocalizerHeartbeatResponse response =
+ recordFactory.newRecordInstance(LocalizerHeartbeatResponse.class);
+ response.setLocalizerAction(LocalizerAction.DIE);
+ return response;
+ }
+
+ @Test
+ public void testLocalizerRPC() throws Exception {
+ InetSocketAddress locAddr = new InetSocketAddress("0.0.0.0", 4344);
+ LocalizerService server = new LocalizerService(locAddr);
+ try {
+ server.start();
+ Configuration conf = new Configuration();
+ YarnRPC rpc = YarnRPC.create(conf);
+ LocalizationProtocol client = (LocalizationProtocol)
+ rpc.getProxy(LocalizationProtocol.class, locAddr, conf);
+ LocalizerStatus status =
+ recordFactory.newRecordInstance(LocalizerStatus.class);
+ status.setLocalizerId("localizer0");
+ LocalizerHeartbeatResponse response = client.heartbeat(status);
+ assertEquals(dieHBResponse(), response);
+ } finally {
+ server.stop();
+ }
+ assertTrue(true);
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/TestPBRecordImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/TestPBRecordImpl.java
new file mode 100644
index 0000000..c546e7d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/TestPBRecordImpl.java
@@ -0,0 +1,164 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.impl.pb.LocalResourcePBImpl;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalResourceStatusProto;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerHeartbeatResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerStatusProto;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerAction;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.ResourceStatusType;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+public class TestPBRecordImpl {
+
+ static final RecordFactory recordFactory = createPBRecordFactory();
+
+ static RecordFactory createPBRecordFactory() {
+ Configuration conf = new Configuration();
+ conf.set(RecordFactoryProvider.RPC_SERIALIZER_KEY, "protocolbuffers");
+ return RecordFactoryProvider.getRecordFactory(conf);
+ }
+
+ static LocalResource createResource() {
+ LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
+ assertTrue(ret instanceof LocalResourcePBImpl);
+ ret.setResource(
+ ConverterUtils.getYarnUrlFromPath(
+ new Path("hdfs://y.ak:8020/foo/bar")));
+ ret.setSize(4344L);
+ ret.setTimestamp(3141592653589793L);
+ ret.setVisibility(LocalResourceVisibility.PUBLIC);
+ return ret;
+ }
+
+ static LocalResourceStatus createLocalResourceStatus() {
+ LocalResourceStatus ret =
+ recordFactory.newRecordInstance(LocalResourceStatus.class);
+ assertTrue(ret instanceof LocalResourceStatusPBImpl);
+ ret.setResource(createResource());
+ ret.setLocalPath(
+ ConverterUtils.getYarnUrlFromPath(
+ new Path("file:///local/foo/bar")));
+ ret.setStatus(ResourceStatusType.FETCH_SUCCESS);
+ ret.setLocalSize(4443L);
+ Exception e = new Exception("Dingos.");
+ e.setStackTrace(new StackTraceElement[] {
+ new StackTraceElement("foo", "bar", "baz", 10),
+ new StackTraceElement("sbb", "one", "onm", 10) });
+ ret.setException(RPCUtil.getRemoteException(e));
+ return ret;
+ }
+
+ static LocalizerStatus createLocalizerStatus() {
+ LocalizerStatus ret =
+ recordFactory.newRecordInstance(LocalizerStatus.class);
+ assertTrue(ret instanceof LocalizerStatusPBImpl);
+ ret.setLocalizerId("localizer0");
+ ret.addResourceStatus(createLocalResourceStatus());
+ return ret;
+ }
+
+ static LocalizerHeartbeatResponse createLocalizerHeartbeatResponse() {
+ LocalizerHeartbeatResponse ret =
+ recordFactory.newRecordInstance(LocalizerHeartbeatResponse.class);
+ assertTrue(ret instanceof LocalizerHeartbeatResponsePBImpl);
+ ret.setLocalizerAction(LocalizerAction.LIVE);
+ ret.addResource(createResource());
+ return ret;
+ }
+
+ @Test
+ public void testLocalResourceStatusSerDe() throws Exception {
+ LocalResourceStatus rsrcS = createLocalResourceStatus();
+ assertTrue(rsrcS instanceof LocalResourceStatusPBImpl);
+ LocalResourceStatusPBImpl rsrcPb = (LocalResourceStatusPBImpl) rsrcS;
+ DataOutputBuffer out = new DataOutputBuffer();
+ rsrcPb.getProto().writeDelimitedTo(out);
+ DataInputBuffer in = new DataInputBuffer();
+ in.reset(out.getData(), 0, out.getLength());
+ LocalResourceStatusProto rsrcPbD =
+ LocalResourceStatusProto.parseDelimitedFrom(in);
+ assertNotNull(rsrcPbD);
+ LocalResourceStatus rsrcD =
+ new LocalResourceStatusPBImpl(rsrcPbD);
+
+ assertEquals(rsrcS, rsrcD);
+ assertEquals(createResource(), rsrcS.getResource());
+ assertEquals(createResource(), rsrcD.getResource());
+ }
+
+ @Test
+ public void testLocalizerStatusSerDe() throws Exception {
+ LocalizerStatus rsrcS = createLocalizerStatus();
+ assertTrue(rsrcS instanceof LocalizerStatusPBImpl);
+ LocalizerStatusPBImpl rsrcPb = (LocalizerStatusPBImpl) rsrcS;
+ DataOutputBuffer out = new DataOutputBuffer();
+ rsrcPb.getProto().writeDelimitedTo(out);
+ DataInputBuffer in = new DataInputBuffer();
+ in.reset(out.getData(), 0, out.getLength());
+ LocalizerStatusProto rsrcPbD =
+ LocalizerStatusProto.parseDelimitedFrom(in);
+ assertNotNull(rsrcPbD);
+ LocalizerStatus rsrcD =
+ new LocalizerStatusPBImpl(rsrcPbD);
+
+ assertEquals(rsrcS, rsrcD);
+ assertEquals("localizer0", rsrcS.getLocalizerId());
+ assertEquals("localizer0", rsrcD.getLocalizerId());
+ assertEquals(createLocalResourceStatus(), rsrcS.getResourceStatus(0));
+ assertEquals(createLocalResourceStatus(), rsrcD.getResourceStatus(0));
+ }
+
+ @Test
+ public void testLocalizerHeartbeatResponseSerDe() throws Exception {
+ LocalizerHeartbeatResponse rsrcS = createLocalizerHeartbeatResponse();
+ assertTrue(rsrcS instanceof LocalizerHeartbeatResponsePBImpl);
+ LocalizerHeartbeatResponsePBImpl rsrcPb =
+ (LocalizerHeartbeatResponsePBImpl) rsrcS;
+ DataOutputBuffer out = new DataOutputBuffer();
+ rsrcPb.getProto().writeDelimitedTo(out);
+ DataInputBuffer in = new DataInputBuffer();
+ in.reset(out.getData(), 0, out.getLength());
+ LocalizerHeartbeatResponseProto rsrcPbD =
+ LocalizerHeartbeatResponseProto.parseDelimitedFrom(in);
+ assertNotNull(rsrcPbD);
+ LocalizerHeartbeatResponse rsrcD =
+ new LocalizerHeartbeatResponsePBImpl(rsrcPbD);
+
+ assertEquals(rsrcS, rsrcD);
+ assertEquals(createResource(), rsrcS.getLocalResource(0));
+ assertEquals(createResource(), rsrcD.getLocalResource(0));
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
new file mode 100644
index 0000000..6ddb729
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
@@ -0,0 +1,211 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.api.ResourceTracker;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.LocalRMInterface;
+import org.apache.hadoop.yarn.server.nodemanager.NMConfig;
+import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext;
+import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater;
+import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;
+import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
+import org.apache.hadoop.yarn.service.Service.STATE;
+import org.junit.After;
+import org.junit.Before;
+
+public abstract class BaseContainerManagerTest {
+
+ protected static RecordFactory recordFactory = RecordFactoryProvider
+ .getRecordFactory(null);
+
+ protected static FileContext localFS;
+ protected static File localDir;
+ protected static File localLogDir;
+ protected static File remoteLogDir;
+ protected static File tmpDir;
+
+ protected final NodeManagerMetrics metrics = NodeManagerMetrics.create();
+
+ public BaseContainerManagerTest() throws UnsupportedFileSystemException {
+ localFS = FileContext.getLocalFSFileContext();
+ localDir =
+ new File("target", this.getClass().getName() + "-localDir")
+ .getAbsoluteFile();
+ localLogDir =
+ new File("target", this.getClass().getName() + "-localLogDir")
+ .getAbsoluteFile();
+ remoteLogDir =
+ new File("target", this.getClass().getName() + "-remoteLogDir")
+ .getAbsoluteFile();
+ tmpDir = new File("target", this.getClass().getName() + "-tmpDir");
+ }
+
+ protected static Log LOG = LogFactory
+ .getLog(BaseContainerManagerTest.class);
+
+ protected Configuration conf = new YarnConfiguration();
+ protected Context context = new NMContext();
+ protected ContainerExecutor exec;
+ protected DeletionService delSrvc;
+ protected String user = "nobody";
+
+ protected NodeStatusUpdater nodeStatusUpdater = new NodeStatusUpdaterImpl(
+ context, new AsyncDispatcher(), null, metrics) {
+ @Override
+ protected ResourceTracker getRMClient() {
+ return new LocalRMInterface();
+ };
+
+ @Override
+ protected void startStatusUpdater() {
+ return; // Don't start any updating thread.
+ }
+ };
+
+ protected ContainerManagerImpl containerManager = null;
+
+ protected ContainerExecutor createContainerExecutor() {
+ DefaultContainerExecutor exec = new DefaultContainerExecutor();
+ exec.setConf(conf);
+ return exec;
+ }
+
+ @Before
+ public void setup() throws IOException {
+ localFS.delete(new Path(localDir.getAbsolutePath()), true);
+ localFS.delete(new Path(tmpDir.getAbsolutePath()), true);
+ localFS.delete(new Path(localLogDir.getAbsolutePath()), true);
+ localFS.delete(new Path(remoteLogDir.getAbsolutePath()), true);
+ localDir.mkdir();
+ tmpDir.mkdir();
+ localLogDir.mkdir();
+ remoteLogDir.mkdir();
+ LOG.info("Created localDir in " + localDir.getAbsolutePath());
+ LOG.info("Created tmpDir in " + tmpDir.getAbsolutePath());
+
+ String bindAddress = "0.0.0.0:5555";
+ conf.set(NMConfig.NM_BIND_ADDRESS, bindAddress);
+ conf.set(NMConfig.NM_LOCAL_DIR, localDir.getAbsolutePath());
+ conf.set(NMConfig.NM_LOG_DIR, localLogDir.getAbsolutePath());
+ conf.set(NMConfig.REMOTE_USER_LOG_DIR, remoteLogDir.getAbsolutePath());
+
+ // Default delSrvc
+ delSrvc = new DeletionService(exec) {
+ @Override
+ public void delete(String user, Path subDir, Path[] baseDirs) {
+ // Don't do any deletions.
+ LOG.info("Psuedo delete: user - " + user + ", subDir - " + subDir
+ + ", baseDirs - " + baseDirs);
+ };
+ };
+ delSrvc.init(conf);
+
+ exec = createContainerExecutor();
+ containerManager =
+ new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater,
+ metrics);
+ containerManager.init(conf);
+ }
+
+ @After
+ public void tearDown() throws IOException, InterruptedException {
+ if (containerManager != null
+ && containerManager.getServiceState() == STATE.STARTED) {
+ containerManager.stop();
+ }
+ createContainerExecutor().deleteAsUser(user,
+ new Path(localDir.getAbsolutePath()), new Path[] {});
+ }
+
+ public static void waitForContainerState(ContainerManager containerManager,
+ ContainerId containerID, ContainerState finalState)
+ throws InterruptedException, YarnRemoteException {
+ waitForContainerState(containerManager, containerID, finalState, 20);
+ }
+
+ public static void waitForContainerState(ContainerManager containerManager,
+ ContainerId containerID, ContainerState finalState, int timeOutMax)
+ throws InterruptedException, YarnRemoteException {
+ GetContainerStatusRequest request =
+ recordFactory.newRecordInstance(GetContainerStatusRequest.class);
+ request.setContainerId(containerID);
+ ContainerStatus containerStatus =
+ containerManager.getContainerStatus(request).getStatus();
+ int timeoutSecs = 0;
+ while (!containerStatus.getState().equals(finalState)
+ && timeoutSecs++ < timeOutMax) {
+ Thread.sleep(1000);
+ LOG.info("Waiting for container to get into state " + finalState
+ + ". Current state is " + containerStatus.getState());
+ containerStatus = containerManager.getContainerStatus(request).getStatus();
+ }
+ LOG.info("Container state is " + containerStatus.getState());
+ Assert.assertEquals("ContainerState is not correct (timedout)",
+ finalState, containerStatus.getState());
+ }
+
+ static void waitForApplicationState(ContainerManagerImpl containerManager,
+ ApplicationId appID, ApplicationState finalState)
+ throws InterruptedException {
+ // Wait for app-finish
+ Application app =
+ containerManager.context.getApplications().get(appID);
+ int timeout = 0;
+ while (!(app.getApplicationState().equals(finalState))
+ && timeout++ < 15) {
+ LOG.info("Waiting for app to reach " + finalState
+ + ".. Current state is "
+ + app.getApplicationState());
+ Thread.sleep(1000);
+ }
+
+ Assert.assertTrue("App is not in " + finalState + " yet!! Timedout!!",
+ app.getApplicationState().equals(finalState));
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java
new file mode 100644
index 0000000..e30374b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java
@@ -0,0 +1,161 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServices;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEventType;
+import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.service.Service;
+
+
+import static org.apache.hadoop.yarn.service.Service.STATE.*;
+
+public class TestAuxServices {
+
+ static class LightService extends AbstractService
+ implements AuxServices.AuxiliaryService {
+ private final char idef;
+ private final int expected_appId;
+ private int remaining_init;
+ private int remaining_stop;
+ LightService(String name, char idef, int expected_appId) {
+ super(name);
+ this.idef = idef;
+ this.expected_appId = expected_appId;
+ }
+ @Override
+ public void init(Configuration conf) {
+ remaining_init = conf.getInt(idef + ".expected.init", 0);
+ remaining_stop = conf.getInt(idef + ".expected.stop", 0);
+ super.init(conf);
+ }
+ @Override
+ public void stop() {
+ assertEquals(0, remaining_init);
+ assertEquals(0, remaining_stop);
+ super.stop();
+ }
+ @Override
+ public void initApp(String user, ApplicationId appId, ByteBuffer data) {
+ assertEquals(idef, data.getChar());
+ assertEquals(expected_appId, data.getInt());
+ assertEquals(expected_appId, appId.getId());
+ }
+ @Override
+ public void stopApp(ApplicationId appId) {
+ assertEquals(expected_appId, appId.getId());
+ }
+ }
+
+ static class ServiceA extends LightService {
+ public ServiceA() { super("A", 'A', 65); }
+ }
+
+ static class ServiceB extends LightService {
+ public ServiceB() { super("B", 'B', 66); }
+ }
+
+ @Test
+ public void testAuxEventDispatch() {
+ Configuration conf = new Configuration();
+ conf.setStrings(AuxServices.AUX_SERVICES, new String[] { "Asrv", "Bsrv" });
+ conf.setClass(String.format(AuxServices.AUX_SERVICE_CLASS_FMT, "Asrv"),
+ ServiceA.class, Service.class);
+ conf.setClass(String.format(AuxServices.AUX_SERVICE_CLASS_FMT, "Bsrv"),
+ ServiceB.class, Service.class);
+ conf.setInt("A.expected.init", 1);
+ conf.setInt("B.expected.stop", 1);
+ final AuxServices aux = new AuxServices();
+ aux.init(conf);
+ aux.start();
+
+ ApplicationId appId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class);
+ appId.setId(65);
+ ByteBuffer buf = ByteBuffer.allocate(6);
+ buf.putChar('A');
+ buf.putInt(65);
+ buf.flip();
+ AuxServicesEvent event = new AuxServicesEvent(
+ AuxServicesEventType.APPLICATION_INIT, "user0", appId, "Asrv", buf);
+ aux.handle(event);
+ appId.setId(66);
+ event = new AuxServicesEvent(
+ AuxServicesEventType.APPLICATION_STOP, "user0", appId, "Bsrv", null);
+ }
+
+ @Test
+ public void testAuxServices() {
+ Configuration conf = new Configuration();
+ conf.setStrings(AuxServices.AUX_SERVICES, new String[] { "Asrv", "Bsrv" });
+ conf.setClass(String.format(AuxServices.AUX_SERVICE_CLASS_FMT, "Asrv"),
+ ServiceA.class, Service.class);
+ conf.setClass(String.format(AuxServices.AUX_SERVICE_CLASS_FMT, "Bsrv"),
+ ServiceB.class, Service.class);
+ final AuxServices aux = new AuxServices();
+ aux.init(conf);
+
+ int latch = 1;
+ for (Service s : aux.getServices()) {
+ assertEquals(INITED, s.getServiceState());
+ if (s instanceof ServiceA) { latch *= 2; }
+ else if (s instanceof ServiceB) { latch *= 3; }
+ else fail("Unexpected service type " + s.getClass());
+ }
+ assertEquals("Invalid mix of services", 6, latch);
+ aux.start();
+ for (Service s : aux.getServices()) {
+ assertEquals(STARTED, s.getServiceState());
+ }
+
+ aux.stop();
+ for (Service s : aux.getServices()) {
+ assertEquals(STOPPED, s.getServiceState());
+ }
+ }
+
+ @Test
+ public void testAuxUnexpectedStop() {
+ Configuration conf = new Configuration();
+ conf.setStrings(AuxServices.AUX_SERVICES, new String[] { "Asrv", "Bsrv" });
+ conf.setClass(String.format(AuxServices.AUX_SERVICE_CLASS_FMT, "Asrv"),
+ ServiceA.class, Service.class);
+ conf.setClass(String.format(AuxServices.AUX_SERVICE_CLASS_FMT, "Bsrv"),
+ ServiceB.class, Service.class);
+ final AuxServices aux = new AuxServices();
+ aux.init(conf);
+ aux.start();
+
+ Service s = aux.getServices().iterator().next();
+ s.stop();
+ assertEquals("Auxiliary service stopped, but AuxService unaffected.",
+ STOPPED, aux.getServiceState());
+ assertTrue(aux.getServices().isEmpty());
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
new file mode 100644
index 0000000..ddf8d7b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
@@ -0,0 +1,374 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.Arrays;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.junit.Test;
+
+public class TestContainerManager extends BaseContainerManagerTest {
+
+ public TestContainerManager() throws UnsupportedFileSystemException {
+ super();
+ }
+
+ static {
+ LOG = LogFactory.getLog(TestContainerManager.class);
+ }
+
+ @Test
+ public void testContainerManagerInitialization() throws IOException {
+
+ containerManager.start();
+
+ // Just do a query for a non-existing container.
+ boolean throwsException = false;
+ try {
+ GetContainerStatusRequest request = recordFactory.newRecordInstance(GetContainerStatusRequest.class);
+ request.setContainerId(recordFactory.newRecordInstance(ContainerId.class));
+ containerManager.getContainerStatus(request);
+ } catch (YarnRemoteException e) {
+ throwsException = true;
+ }
+ Assert.assertTrue(throwsException);
+ }
+
+ @Test
+ public void testContainerSetup() throws IOException, InterruptedException {
+
+ containerManager.start();
+
+ // ////// Create the resources for the container
+ File dir = new File(tmpDir, "dir");
+ dir.mkdirs();
+ File file = new File(dir, "file");
+ PrintWriter fileWriter = new PrintWriter(file);
+ fileWriter.write("Hello World!");
+ fileWriter.close();
+
+ ContainerLaunchContext container = recordFactory.newRecordInstance(ContainerLaunchContext.class);
+
+ // ////// Construct the Container-id
+ ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class);
+ ContainerId cId = recordFactory.newRecordInstance(ContainerId.class);
+ cId.setAppId(appId);
+ container.setContainerId(cId);
+
+ container.setUser(user);
+
+ // ////// Construct the container-spec.
+ ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
+// containerLaunchContext.resources = new HashMap<CharSequence, LocalResource>();
+ URL resource_alpha =
+ ConverterUtils.getYarnUrlFromPath(localFS
+ .makeQualified(new Path(file.getAbsolutePath())));
+ LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class);
+ rsrc_alpha.setResource(resource_alpha);
+ rsrc_alpha.setSize(-1);
+ rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
+ rsrc_alpha.setType(LocalResourceType.FILE);
+ rsrc_alpha.setTimestamp(file.lastModified());
+ String destinationFile = "dest_file";
+ containerLaunchContext.setLocalResource(destinationFile, rsrc_alpha);
+ containerLaunchContext.setUser(container.getUser());
+ containerLaunchContext.setContainerId(container.getContainerId());
+ containerLaunchContext.setResource(recordFactory
+ .newRecordInstance(Resource.class));
+// containerLaunchContext.command = new ArrayList<CharSequence>();
+
+ StartContainerRequest startRequest = recordFactory.newRecordInstance(StartContainerRequest.class);
+ startRequest.setContainerLaunchContext(containerLaunchContext);
+
+ containerManager.startContainer(startRequest);
+
+ BaseContainerManagerTest.waitForContainerState(containerManager, cId,
+ ContainerState.COMPLETE);
+
+ // Now ascertain that the resources are localised correctly.
+ // TODO: Don't we need clusterStamp in localDir?
+ String appIDStr = ConverterUtils.toString(appId);
+ String containerIDStr = ConverterUtils.toString(cId);
+ File userCacheDir = new File(localDir, ContainerLocalizer.USERCACHE);
+ File userDir = new File(userCacheDir, user);
+ File appCache = new File(userDir, ContainerLocalizer.APPCACHE);
+ File appDir = new File(appCache, appIDStr);
+ File containerDir = new File(appDir, containerIDStr);
+ File targetFile = new File(containerDir, destinationFile);
+ File sysDir =
+ new File(localDir,
+ ResourceLocalizationService.NM_PRIVATE_DIR);
+ File appSysDir = new File(sysDir, appIDStr);
+ File containerSysDir = new File(appSysDir, containerIDStr);
+
+ for (File f : new File[] { localDir, sysDir, userCacheDir, appDir,
+ appSysDir,
+ containerDir, containerSysDir }) {
+ Assert.assertTrue(f.getAbsolutePath() + " doesn't exist!!", f.exists());
+ Assert.assertTrue(f.getAbsolutePath() + " is not a directory!!",
+ f.isDirectory());
+ }
+ Assert.assertTrue(targetFile.getAbsolutePath() + " doesn't exist!!",
+ targetFile.exists());
+
+ // Now verify the contents of the file
+ BufferedReader reader = new BufferedReader(new FileReader(targetFile));
+ Assert.assertEquals("Hello World!", reader.readLine());
+ Assert.assertEquals(null, reader.readLine());
+ }
+
+ @Test
+ public void testContainerLaunchAndStop() throws IOException,
+ InterruptedException {
+ containerManager.start();
+
+ File scriptFile = new File(tmpDir, "scriptFile.sh");
+ PrintWriter fileWriter = new PrintWriter(scriptFile);
+ File processStartFile =
+ new File(tmpDir, "start_file.txt").getAbsoluteFile();
+ fileWriter.write("\numask 0"); // So that start file is readable by the test.
+ fileWriter.write("\necho Hello World! > " + processStartFile);
+ fileWriter.write("\necho $$ >> " + processStartFile);
+ fileWriter.write("\nexec sleep 100");
+ fileWriter.close();
+
+ ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
+
+ // ////// Construct the Container-id
+ ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class);
+ ContainerId cId = recordFactory.newRecordInstance(ContainerId.class);
+ cId.setAppId(appId);
+ containerLaunchContext.setContainerId(cId);
+
+ containerLaunchContext.setUser(user);
+
+// containerLaunchContext.resources =new HashMap<CharSequence, LocalResource>();
+ URL resource_alpha =
+ ConverterUtils.getYarnUrlFromPath(localFS
+ .makeQualified(new Path(scriptFile.getAbsolutePath())));
+ LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class);
+ rsrc_alpha.setResource(resource_alpha);
+ rsrc_alpha.setSize(-1);
+ rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
+ rsrc_alpha.setType(LocalResourceType.FILE);
+ rsrc_alpha.setTimestamp(scriptFile.lastModified());
+ String destinationFile = "dest_file";
+ containerLaunchContext.setLocalResource(destinationFile, rsrc_alpha);
+ containerLaunchContext.setUser(containerLaunchContext.getUser());
+ containerLaunchContext.addCommand("/bin/bash");
+ containerLaunchContext.addCommand(scriptFile.getAbsolutePath());
+ containerLaunchContext.setResource(recordFactory
+ .newRecordInstance(Resource.class));
+ containerLaunchContext.getResource().setMemory(100 * 1024 * 1024);
+ StartContainerRequest startRequest = recordFactory.newRecordInstance(StartContainerRequest.class);
+ startRequest.setContainerLaunchContext(containerLaunchContext);
+ containerManager.startContainer(startRequest);
+
+ int timeoutSecs = 0;
+ while (!processStartFile.exists() && timeoutSecs++ < 20) {
+ Thread.sleep(1000);
+ LOG.info("Waiting for process start-file to be created");
+ }
+ Assert.assertTrue("ProcessStartFile doesn't exist!",
+ processStartFile.exists());
+
+ // Now verify the contents of the file
+ BufferedReader reader =
+ new BufferedReader(new FileReader(processStartFile));
+ Assert.assertEquals("Hello World!", reader.readLine());
+ // Get the pid of the process
+ String pid = reader.readLine().trim();
+ // No more lines
+ Assert.assertEquals(null, reader.readLine());
+
+ // Now test the stop functionality.
+
+ // Assert that the process is alive
+ Assert.assertTrue("Process is not alive!",
+ exec.signalContainer(user,
+ pid, Signal.NULL));
+ // Once more
+ Assert.assertTrue("Process is not alive!",
+ exec.signalContainer(user,
+ pid, Signal.NULL));
+
+ StopContainerRequest stopRequest = recordFactory.newRecordInstance(StopContainerRequest.class);
+ stopRequest.setContainerId(cId);
+ containerManager.stopContainer(stopRequest);
+
+ BaseContainerManagerTest.waitForContainerState(containerManager, cId,
+ ContainerState.COMPLETE);
+
+ GetContainerStatusRequest gcsRequest = recordFactory.newRecordInstance(GetContainerStatusRequest.class);
+ gcsRequest.setContainerId(cId);
+ ContainerStatus containerStatus = containerManager.getContainerStatus(gcsRequest).getStatus();
+ Assert.assertEquals(String.valueOf(ExitCode.KILLED.getExitCode()),
+ containerStatus.getExitStatus());
+
+ // Assert that the process is not alive anymore
+ Assert.assertFalse("Process is still alive!",
+ exec.signalContainer(user,
+ pid, Signal.NULL));
+ }
+
+ @Test
+ public void testLocalFilesCleanup() throws InterruptedException,
+ IOException {
+ // Real del service
+ delSrvc = new DeletionService(exec);
+ delSrvc.init(conf);
+ containerManager = new ContainerManagerImpl(context, exec, delSrvc,
+ nodeStatusUpdater, metrics);
+ containerManager.init(conf);
+ containerManager.start();
+
+ // ////// Create the resources for the container
+ File dir = new File(tmpDir, "dir");
+ dir.mkdirs();
+ File file = new File(dir, "file");
+ PrintWriter fileWriter = new PrintWriter(file);
+ fileWriter.write("Hello World!");
+ fileWriter.close();
+
+ ContainerLaunchContext container = recordFactory.newRecordInstance(ContainerLaunchContext.class);
+
+ // ////// Construct the Container-id
+ ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class);
+ ContainerId cId = recordFactory.newRecordInstance(ContainerId.class);
+ cId.setAppId(appId);
+ container.setContainerId(cId);
+
+ container.setUser(user);
+
+ // ////// Construct the container-spec.
+ ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
+// containerLaunchContext.resources =
+// new HashMap<CharSequence, LocalResource>();
+ URL resource_alpha =
+ ConverterUtils.getYarnUrlFromPath(FileContext.getLocalFSFileContext()
+ .makeQualified(new Path(file.getAbsolutePath())));
+ LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class);
+ rsrc_alpha.setResource(resource_alpha);
+ rsrc_alpha.setSize(-1);
+ rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
+ rsrc_alpha.setType(LocalResourceType.FILE);
+ rsrc_alpha.setTimestamp(file.lastModified());
+ String destinationFile = "dest_file";
+ containerLaunchContext.setLocalResource(destinationFile, rsrc_alpha);
+ containerLaunchContext.setUser(container.getUser());
+ containerLaunchContext.setContainerId(container.getContainerId());
+ containerLaunchContext.setResource(recordFactory
+ .newRecordInstance(Resource.class));
+
+// containerLaunchContext.command = new ArrayList<CharSequence>();
+
+ StartContainerRequest request = recordFactory.newRecordInstance(StartContainerRequest.class);
+ request.setContainerLaunchContext(containerLaunchContext);
+ containerManager.startContainer(request);
+
+ BaseContainerManagerTest.waitForContainerState(containerManager, cId,
+ ContainerState.COMPLETE);
+
+ BaseContainerManagerTest.waitForApplicationState(containerManager, cId.getAppId(),
+ ApplicationState.RUNNING);
+
+ // Now ascertain that the resources are localised correctly.
+ String appIDStr = ConverterUtils.toString(appId);
+ String containerIDStr = ConverterUtils.toString(cId);
+ File userCacheDir = new File(localDir, ContainerLocalizer.USERCACHE);
+ File userDir = new File(userCacheDir, user);
+ File appCache = new File(userDir, ContainerLocalizer.APPCACHE);
+ File appDir = new File(appCache, appIDStr);
+ File containerDir = new File(appDir, containerIDStr);
+ File targetFile = new File(containerDir, destinationFile);
+ File sysDir =
+ new File(localDir,
+ ResourceLocalizationService.NM_PRIVATE_DIR);
+ File appSysDir = new File(sysDir, appIDStr);
+ File containerSysDir = new File(appSysDir, containerIDStr);
+ // AppDir should still exist
+ Assert.assertTrue("AppDir " + appDir.getAbsolutePath()
+ + " doesn't exist!!", appDir.exists());
+ Assert.assertTrue("AppSysDir " + appSysDir.getAbsolutePath()
+ + " doesn't exist!!", appSysDir.exists());
+ for (File f : new File[] { containerDir, containerSysDir }) {
+ Assert.assertFalse(f.getAbsolutePath() + " exists!!", f.exists());
+ }
+ Assert.assertFalse(targetFile.getAbsolutePath() + " exists!!",
+ targetFile.exists());
+
+ // Simulate RM sending an AppFinish event.
+ containerManager.handle(new CMgrCompletedAppsEvent(Arrays
+ .asList(new ApplicationId[] { appId })));
+
+ BaseContainerManagerTest.waitForApplicationState(containerManager, cId.getAppId(),
+ ApplicationState.FINISHED);
+
+ // Now ascertain that the resources are localised correctly.
+ for (File f : new File[] { appDir, containerDir, appSysDir,
+ containerSysDir }) {
+ // Wait for deletion. Deletion can happen long after AppFinish because of
+ // the async DeletionService
+ int timeout = 0;
+ while (f.exists() && timeout++ < 15) {
+ Thread.sleep(1000);
+ }
+ Assert.assertFalse(f.getAbsolutePath() + " exists!!", f.exists());
+ }
+ // Wait for deletion
+ int timeout = 0;
+ while (targetFile.exists() && timeout++ < 15) {
+ Thread.sleep(1000);
+ }
+ Assert.assertFalse(targetFile.getAbsolutePath() + " exists!!",
+ targetFile.exists());
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
new file mode 100644
index 0000000..2dd60b6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
@@ -0,0 +1,341 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.container;
+
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
+import java.net.URISyntaxException;
+
+import java.nio.ByteBuffer;
+
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Random;
+import java.util.Map.Entry;
+import java.util.AbstractMap.SimpleEntry;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.event.DrainDispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncherEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncherEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationRequestEvent;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+import org.mockito.ArgumentMatcher;
+import static org.mockito.Mockito.*;
+
+public class TestContainer {
+
+ final NodeManagerMetrics metrics = NodeManagerMetrics.create();
+
+ /**
+ * Verify correct container request events sent to localizer.
+ */
+ @Test
+ @SuppressWarnings("unchecked") // mocked generic
+ public void testLocalizationRequest() throws Exception {
+ DrainDispatcher dispatcher = new DrainDispatcher();
+ dispatcher.init(null);
+ try {
+ dispatcher.start();
+ EventHandler<LocalizationEvent> localizerBus = mock(EventHandler.class);
+ dispatcher.register(LocalizationEventType.class, localizerBus);
+ // null serviceData; no registered AuxServicesEventType handler
+
+ ContainerLaunchContext ctxt = mock(ContainerLaunchContext.class);
+ ContainerId cId = getMockContainerId(7, 314159265358979L, 4344);
+ when(ctxt.getUser()).thenReturn("yak");
+ when(ctxt.getContainerId()).thenReturn(cId);
+
+ Random r = new Random();
+ long seed = r.nextLong();
+ r.setSeed(seed);
+ System.out.println("testLocalizationRequest seed: " + seed);
+ final Map<String,LocalResource> localResources = createLocalResources(r);
+ when(ctxt.getAllLocalResources()).thenReturn(localResources);
+
+ final Container c = newContainer(dispatcher, ctxt);
+ assertEquals(ContainerState.NEW, c.getContainerState());
+
+ // Verify request for public/private resources to localizer
+ c.handle(new ContainerEvent(cId, ContainerEventType.INIT_CONTAINER));
+ dispatcher.await();
+ ContainerReqMatcher matchesPublicReq =
+ new ContainerReqMatcher(localResources,
+ EnumSet.of(LocalResourceVisibility.PUBLIC));
+ ContainerReqMatcher matchesPrivateReq =
+ new ContainerReqMatcher(localResources,
+ EnumSet.of(LocalResourceVisibility.PRIVATE));
+ ContainerReqMatcher matchesAppReq =
+ new ContainerReqMatcher(localResources,
+ EnumSet.of(LocalResourceVisibility.APPLICATION));
+ verify(localizerBus).handle(argThat(matchesPublicReq));
+ verify(localizerBus).handle(argThat(matchesPrivateReq));
+ verify(localizerBus).handle(argThat(matchesAppReq));
+ assertEquals(ContainerState.LOCALIZING, c.getContainerState());
+ } finally {
+ dispatcher.stop();
+ }
+ }
+
+ /**
+ * Verify container launch when all resources already cached.
+ */
+ @Test
+ @SuppressWarnings("unchecked") // mocked generic
+ public void testLocalizationLaunch() throws Exception {
+ DrainDispatcher dispatcher = new DrainDispatcher();
+ dispatcher.init(null);
+ try {
+ dispatcher.start();
+ EventHandler<LocalizationEvent> localizerBus = mock(EventHandler.class);
+ dispatcher.register(LocalizationEventType.class, localizerBus);
+ EventHandler<ContainersLauncherEvent> launcherBus =
+ mock(EventHandler.class);
+ dispatcher.register(ContainersLauncherEventType.class, launcherBus);
+ // null serviceData; no registered AuxServicesEventType handler
+
+ ContainerLaunchContext ctxt = mock(ContainerLaunchContext.class);
+ ContainerId cId = getMockContainerId(8, 314159265358979L, 4344);
+ when(ctxt.getUser()).thenReturn("yak");
+ when(ctxt.getContainerId()).thenReturn(cId);
+
+ Random r = new Random();
+ long seed = r.nextLong();
+ r.setSeed(seed);
+ System.out.println("testLocalizationLaunch seed: " + seed);
+ final Map<String,LocalResource> localResources = createLocalResources(r);
+ when(ctxt.getAllLocalResources()).thenReturn(localResources);
+ final Container c = newContainer(dispatcher, ctxt);
+ assertEquals(ContainerState.NEW, c.getContainerState());
+
+ c.handle(new ContainerEvent(cId, ContainerEventType.INIT_CONTAINER));
+ dispatcher.await();
+
+ // Container prepared for localization events
+ Path cache = new Path("file:///cache");
+ Map<Path,String> localPaths = new HashMap<Path,String>();
+ for (Entry<String,LocalResource> rsrc : localResources.entrySet()) {
+ assertEquals(ContainerState.LOCALIZING, c.getContainerState());
+ LocalResourceRequest req = new LocalResourceRequest(rsrc.getValue());
+ Path p = new Path(cache, rsrc.getKey());
+ localPaths.put(p, rsrc.getKey());
+ // rsrc copied to p
+ c.handle(new ContainerResourceLocalizedEvent(c.getContainerID(), req, p));
+ }
+ dispatcher.await();
+
+ // all resources should be localized
+ assertEquals(ContainerState.LOCALIZED, c.getContainerState());
+ for (Entry<Path,String> loc : c.getLocalizedResources().entrySet()) {
+ assertEquals(localPaths.remove(loc.getKey()), loc.getValue());
+ }
+ assertTrue(localPaths.isEmpty());
+
+ // verify container launch
+ ArgumentMatcher<ContainersLauncherEvent> matchesContainerLaunch =
+ new ArgumentMatcher<ContainersLauncherEvent>() {
+ @Override
+ public boolean matches(Object o) {
+ ContainersLauncherEvent launchEvent = (ContainersLauncherEvent) o;
+ return c == launchEvent.getContainer();
+ }
+ };
+ verify(launcherBus).handle(argThat(matchesContainerLaunch));
+ } finally {
+ dispatcher.stop();
+ }
+ }
+
+ /**
+ * Verify serviceData correctly sent.
+ */
+ @Test
+ @SuppressWarnings("unchecked") // mocked generic
+ public void testServiceData() throws Exception {
+ DrainDispatcher dispatcher = new DrainDispatcher();
+ dispatcher.init(null);
+ dispatcher.start();
+ try {
+ EventHandler<LocalizationEvent> localizerBus = mock(EventHandler.class);
+ dispatcher.register(LocalizationEventType.class, localizerBus);
+ EventHandler<AuxServicesEvent> auxBus = mock(EventHandler.class);
+ dispatcher.register(AuxServicesEventType.class, auxBus);
+ EventHandler<ContainersLauncherEvent> launchBus = mock(EventHandler.class);
+ dispatcher.register(ContainersLauncherEventType.class, launchBus);
+
+ ContainerLaunchContext ctxt = mock(ContainerLaunchContext.class);
+ final ContainerId cId = getMockContainerId(9, 314159265358979L, 4344);
+ when(ctxt.getUser()).thenReturn("yak");
+ when(ctxt.getContainerId()).thenReturn(cId);
+ when(ctxt.getAllLocalResources()).thenReturn(
+ Collections.<String,LocalResource>emptyMap());
+
+ Random r = new Random();
+ long seed = r.nextLong();
+ r.setSeed(seed);
+ System.out.println("testServiceData seed: " + seed);
+ final Map<String,ByteBuffer> serviceData = createServiceData(r);
+ when(ctxt.getAllServiceData()).thenReturn(serviceData);
+
+ final Container c = newContainer(dispatcher, ctxt);
+ assertEquals(ContainerState.NEW, c.getContainerState());
+
+ // Verify propagation of service data to AuxServices
+ c.handle(new ContainerEvent(cId, ContainerEventType.INIT_CONTAINER));
+ dispatcher.await();
+ for (final Map.Entry<String,ByteBuffer> e : serviceData.entrySet()) {
+ ArgumentMatcher<AuxServicesEvent> matchesServiceReq =
+ new ArgumentMatcher<AuxServicesEvent>() {
+ @Override
+ public boolean matches(Object o) {
+ AuxServicesEvent evt = (AuxServicesEvent) o;
+ return e.getKey().equals(evt.getServiceID())
+ && 0 == e.getValue().compareTo(evt.getServiceData());
+ }
+ };
+ verify(auxBus).handle(argThat(matchesServiceReq));
+ }
+
+ // verify launch on empty resource request
+ ArgumentMatcher<ContainersLauncherEvent> matchesLaunchReq =
+ new ArgumentMatcher<ContainersLauncherEvent>() {
+ @Override
+ public boolean matches(Object o) {
+ ContainersLauncherEvent evt = (ContainersLauncherEvent) o;
+ return evt.getType() == ContainersLauncherEventType.LAUNCH_CONTAINER
+ && cId == evt.getContainer().getContainerID();
+ }
+ };
+ verify(launchBus).handle(argThat(matchesLaunchReq));
+ } finally {
+ dispatcher.stop();
+ }
+ }
+
+ // Accept iff the resource request payload matches.
+ static class ContainerReqMatcher extends ArgumentMatcher<LocalizationEvent> {
+ final HashSet<LocalResourceRequest> resources =
+ new HashSet<LocalResourceRequest>();
+ ContainerReqMatcher(Map<String,LocalResource> allResources,
+ EnumSet<LocalResourceVisibility> vis) throws URISyntaxException {
+ for (Entry<String,LocalResource> e : allResources.entrySet()) {
+ if (vis.contains(e.getValue().getVisibility())) {
+ resources.add(new LocalResourceRequest(e.getValue()));
+ }
+ }
+ }
+ @Override
+ public boolean matches(Object o) {
+ ContainerLocalizationRequestEvent evt = (ContainerLocalizationRequestEvent) o;
+ final HashSet<LocalResourceRequest> expected =
+ new HashSet<LocalResourceRequest>(resources);
+ for (LocalResourceRequest rsrc : evt.getRequestedResources()) {
+ if (!expected.remove(rsrc)) {
+ return false;
+ }
+ }
+ return expected.isEmpty();
+ }
+ }
+
+ static Entry<String,LocalResource> getMockRsrc(Random r,
+ LocalResourceVisibility vis) {
+ LocalResource rsrc = mock(LocalResource.class);
+
+ String name = Long.toHexString(r.nextLong());
+ URL uri = mock(org.apache.hadoop.yarn.api.records.URL.class);
+ when(uri.getScheme()).thenReturn("file");
+ when(uri.getHost()).thenReturn(null);
+ when(uri.getFile()).thenReturn("/local/" + vis + "/" + name);
+
+ when(rsrc.getResource()).thenReturn(uri);
+ when(rsrc.getSize()).thenReturn(r.nextInt(1024) + 1024L);
+ when(rsrc.getTimestamp()).thenReturn(r.nextInt(1024) + 2048L);
+ when(rsrc.getType()).thenReturn(LocalResourceType.FILE);
+ when(rsrc.getVisibility()).thenReturn(vis);
+
+ return new SimpleEntry<String,LocalResource>(name, rsrc);
+ }
+
+ static Map<String,LocalResource> createLocalResources(Random r) {
+ Map<String,LocalResource> localResources =
+ new HashMap<String,LocalResource>();
+ for (int i = r.nextInt(5) + 5; i >= 0; --i) {
+ Entry<String,LocalResource> rsrc =
+ getMockRsrc(r, LocalResourceVisibility.PUBLIC);
+ localResources.put(rsrc.getKey(), rsrc.getValue());
+ }
+ for (int i = r.nextInt(5) + 5; i >= 0; --i) {
+ Entry<String,LocalResource> rsrc =
+ getMockRsrc(r, LocalResourceVisibility.PRIVATE);
+ localResources.put(rsrc.getKey(), rsrc.getValue());
+ }
+ for (int i = r.nextInt(2) + 2; i >= 0; --i) {
+ Entry<String,LocalResource> rsrc =
+ getMockRsrc(r, LocalResourceVisibility.APPLICATION);
+ localResources.put(rsrc.getKey(), rsrc.getValue());
+ }
+ return localResources;
+ }
+
+ static ContainerId getMockContainerId(int appId, long timestamp, int id) {
+ ApplicationId aId = mock(ApplicationId.class);
+ when(aId.getId()).thenReturn(appId);
+ when(aId.getClusterTimestamp()).thenReturn(timestamp);
+ ContainerId cId = mock(ContainerId.class);
+ when(cId.getId()).thenReturn(id);
+ when(cId.getAppId()).thenReturn(aId);
+ return cId;
+ }
+
+ static Map<String,ByteBuffer> createServiceData(Random r) {
+ Map<String,ByteBuffer> serviceData =
+ new HashMap<String,ByteBuffer>();
+ for (int i = r.nextInt(5) + 5; i >= 0; --i) {
+ String service = Long.toHexString(r.nextLong());
+ byte[] b = new byte[r.nextInt(1024) + 1024];
+ r.nextBytes(b);
+ serviceData.put(service, ByteBuffer.wrap(b));
+ }
+ return serviceData;
+ }
+
+ Container newContainer(Dispatcher disp, ContainerLaunchContext ctx) {
+ return new ContainerImpl(disp, ctx, null, metrics);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/FakeFSDataInputStream.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/FakeFSDataInputStream.java
new file mode 100644
index 0000000..2483254
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/FakeFSDataInputStream.java
@@ -0,0 +1,41 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
+
+import java.io.FilterInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.Seekable;
+
+/** mock streams in unit tests */
+public class FakeFSDataInputStream
+ extends FilterInputStream implements Seekable, PositionedReadable {
+ public FakeFSDataInputStream(InputStream in) { super(in); }
+ public void seek(long pos) throws IOException { }
+ public long getPos() throws IOException { return -1; }
+ public boolean seekToNewSource(long targetPos) throws IOException {
+ return false;
+ }
+ public int read(long position, byte[] buffer, int offset, int length)
+ throws IOException { return -1; }
+ public void readFully(long position, byte[] buffer, int offset, int length)
+ throws IOException { }
+ public void readFully(long position, byte[] buffer) throws IOException { }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/MockLocalResourceStatus.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/MockLocalResourceStatus.java
new file mode 100644
index 0000000..cb2a621
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/MockLocalResourceStatus.java
@@ -0,0 +1,82 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
+
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.ResourceStatusType;
+
+public class MockLocalResourceStatus implements LocalResourceStatus {
+
+ private LocalResource rsrc = null;
+ private ResourceStatusType tag = null;
+ private URL localPath = null;
+ private long size = -1L;
+ private YarnRemoteException ex = null;
+
+ MockLocalResourceStatus() { }
+ MockLocalResourceStatus(LocalResource rsrc, ResourceStatusType tag,
+ URL localPath, YarnRemoteException ex) {
+ this.rsrc = rsrc;
+ this.tag = tag;
+ this.localPath = localPath;
+ this.ex = ex;
+ }
+
+ @Override
+ public LocalResource getResource() { return rsrc; }
+ @Override
+ public ResourceStatusType getStatus() { return tag; }
+ @Override
+ public long getLocalSize() { return size; }
+ @Override
+ public URL getLocalPath() { return localPath; }
+ @Override
+ public YarnRemoteException getException() { return ex; }
+ @Override
+ public void setResource(LocalResource rsrc) { this.rsrc = rsrc; }
+ @Override
+ public void setStatus(ResourceStatusType tag) { this.tag = tag; }
+ @Override
+ public void setLocalPath(URL localPath) { this.localPath = localPath; }
+ @Override
+ public void setLocalSize(long size) { this.size = size; }
+ @Override
+ public void setException(YarnRemoteException ex) { this.ex = ex; }
+
+ @Override
+ public boolean equals(Object o) {
+ if (!(o instanceof MockLocalResourceStatus)) {
+ return false;
+ }
+ MockLocalResourceStatus other = (MockLocalResourceStatus) o;
+ return getResource().equals(other.getResource())
+ && getStatus().equals(other.getStatus())
+ && (null != getLocalPath()
+ && getLocalPath().equals(other.getLocalPath()))
+ && (null != getException()
+ && getException().equals(other.getException()));
+ }
+
+ @Override
+ public int hashCode() {
+ return 4344;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/MockLocalizerHeartbeatResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/MockLocalizerHeartbeatResponse.java
new file mode 100644
index 0000000..ca47394
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/MockLocalizerHeartbeatResponse.java
@@ -0,0 +1,55 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerAction;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse;
+
+public class MockLocalizerHeartbeatResponse
+ implements LocalizerHeartbeatResponse {
+
+ LocalizerAction action;
+ List<LocalResource> rsrc;
+
+ MockLocalizerHeartbeatResponse() {
+ rsrc = new ArrayList<LocalResource>();
+ }
+
+ MockLocalizerHeartbeatResponse(
+ LocalizerAction action, List<LocalResource> rsrc) {
+ this.action = action;
+ this.rsrc = rsrc;
+ }
+
+ public LocalizerAction getLocalizerAction() { return action; }
+ public List<LocalResource> getAllResources() { return rsrc; }
+ public LocalResource getLocalResource(int i) { return rsrc.get(i); }
+ public void setLocalizerAction(LocalizerAction action) {
+ this.action = action;
+ }
+ public void addAllResources(List<LocalResource> resources) {
+ rsrc.addAll(resources);
+ }
+ public void addResource(LocalResource resource) { rsrc.add(resource); }
+ public void removeResource(int index) { rsrc.remove(index); }
+ public void clearResources() { rsrc.clear(); }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/MockLocalizerStatus.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/MockLocalizerStatus.java
new file mode 100644
index 0000000..f4e5d23
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/MockLocalizerStatus.java
@@ -0,0 +1,80 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus;
+
+public class MockLocalizerStatus implements LocalizerStatus {
+
+ private String locId;
+ private List<LocalResourceStatus> stats;
+
+ public MockLocalizerStatus() {
+ stats = new ArrayList<LocalResourceStatus>();
+ }
+
+ public MockLocalizerStatus(String locId, List<LocalResourceStatus> stats) {
+ this.locId = locId;
+ this.stats = stats;
+ }
+
+ @Override
+ public String getLocalizerId() { return locId; }
+ @Override
+ public List<LocalResourceStatus> getResources() { return stats; }
+ @Override
+ public void setLocalizerId(String id) { this.locId = id; }
+ @Override
+ public void addAllResources(List<LocalResourceStatus> rsrcs) {
+ stats.addAll(rsrcs);
+ }
+ @Override
+ public LocalResourceStatus getResourceStatus(int index) {
+ return stats.get(index);
+ }
+ @Override
+ public void addResourceStatus(LocalResourceStatus resource) {
+ stats.add(resource);
+ }
+ @Override
+ public void removeResource(int index) {
+ stats.remove(index);
+ }
+ public void clearResources() { stats.clear(); }
+
+ @Override
+ public boolean equals(Object o) {
+ if (!(o instanceof MockLocalizerStatus)) {
+ return false;
+ }
+ MockLocalizerStatus other = (MockLocalizerStatus) o;
+ return getLocalizerId().equals(other)
+ && getResources().containsAll(other.getResources())
+ && other.getResources().containsAll(getResources());
+ }
+
+ @Override
+ public int hashCode() {
+ return 4344;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java
new file mode 100644
index 0000000..e4b68ff
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java
@@ -0,0 +1,307 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Matchers.isA;
+import static org.mockito.Matchers.same;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.AbstractFileSystem;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerAction;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus;
+import org.junit.Test;
+import org.mockito.ArgumentMatcher;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+public class TestContainerLocalizer {
+
+ static final Path basedir =
+ new Path("target", TestContainerLocalizer.class.getName());
+
+ @Test
+ @SuppressWarnings("unchecked") // mocked generics
+ public void testContainerLocalizerMain() throws Exception {
+ Configuration conf = new Configuration();
+ AbstractFileSystem spylfs =
+ spy(FileContext.getLocalFSFileContext().getDefaultFileSystem());
+ // don't actually create dirs
+ doNothing().when(spylfs).mkdir(
+ isA(Path.class), isA(FsPermission.class), anyBoolean());
+ FileContext lfs = FileContext.getFileContext(spylfs, conf);
+ final String user = "yak";
+ final String appId = "app_RM_0";
+ final String cId = "container_0";
+ final InetSocketAddress nmAddr = new InetSocketAddress("foobar", 4344);
+ final List<Path> localDirs = new ArrayList<Path>();
+ for (int i = 0; i < 4; ++i) {
+ localDirs.add(lfs.makeQualified(new Path(basedir, i + "")));
+ }
+ RecordFactory mockRF = getMockLocalizerRecordFactory();
+ ContainerLocalizer concreteLoc = new ContainerLocalizer(lfs, user,
+ appId, cId, localDirs, mockRF);
+ ContainerLocalizer localizer = spy(concreteLoc);
+
+ // return credential stream instead of opening local file
+ final Random r = new Random();
+ long seed = r.nextLong();
+ r.setSeed(seed);
+ System.out.println("SEED: " + seed);
+ DataInputBuffer appTokens = createFakeCredentials(r, 10);
+ Path tokenPath =
+ lfs.makeQualified(new Path(
+ String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, cId)));
+ doReturn(new FSDataInputStream(new FakeFSDataInputStream(appTokens))
+ ).when(spylfs).open(tokenPath);
+
+ // mock heartbeat responses from NM
+ LocalizationProtocol nmProxy = mock(LocalizationProtocol.class);
+ LocalResource rsrcA = getMockRsrc(r, LocalResourceVisibility.PRIVATE);
+ LocalResource rsrcB = getMockRsrc(r, LocalResourceVisibility.PRIVATE);
+ LocalResource rsrcC = getMockRsrc(r, LocalResourceVisibility.APPLICATION);
+ LocalResource rsrcD = getMockRsrc(r, LocalResourceVisibility.PRIVATE);
+ when(nmProxy.heartbeat(isA(LocalizerStatus.class)))
+ .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE,
+ Collections.singletonList(rsrcA)))
+ .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE,
+ Collections.singletonList(rsrcB)))
+ .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE,
+ Collections.singletonList(rsrcC)))
+ .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE,
+ Collections.singletonList(rsrcD)))
+ .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE,
+ Collections.<LocalResource>emptyList()))
+ .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.DIE,
+ null));
+ doReturn(new FakeDownload(rsrcA.getResource().getFile(), true)).when(
+ localizer).download(isA(LocalDirAllocator.class), eq(rsrcA),
+ isA(UserGroupInformation.class));
+ doReturn(new FakeDownload(rsrcB.getResource().getFile(), true)).when(
+ localizer).download(isA(LocalDirAllocator.class), eq(rsrcB),
+ isA(UserGroupInformation.class));
+ doReturn(new FakeDownload(rsrcC.getResource().getFile(), true)).when(
+ localizer).download(isA(LocalDirAllocator.class), eq(rsrcC),
+ isA(UserGroupInformation.class));
+ doReturn(new FakeDownload(rsrcD.getResource().getFile(), true)).when(
+ localizer).download(isA(LocalDirAllocator.class), eq(rsrcD),
+ isA(UserGroupInformation.class));
+ doReturn(nmProxy).when(localizer).getProxy(nmAddr);
+ doNothing().when(localizer).sleep(anyInt());
+
+ // return result instantly for deterministic test
+ ExecutorService syncExec = mock(ExecutorService.class);
+ when(syncExec.submit(isA(Callable.class)))
+ .thenAnswer(new Answer<Future<Path>>() {
+ @Override
+ public Future<Path> answer(InvocationOnMock invoc)
+ throws Throwable {
+ Future<Path> done = mock(Future.class);
+ when(done.isDone()).thenReturn(true);
+ FakeDownload d = (FakeDownload) invoc.getArguments()[0];
+ when(done.get()).thenReturn(d.call());
+ return done;
+ }
+ });
+ doReturn(syncExec).when(localizer).createDownloadThreadPool();
+
+ // run localization
+ assertEquals(0, localizer.runLocalization(nmAddr));
+
+ // verify created cache
+ for (Path p : localDirs) {
+ Path base = new Path(new Path(p, ContainerLocalizer.USERCACHE), user);
+ Path privcache = new Path(base, ContainerLocalizer.FILECACHE);
+ // $x/usercache/$user/filecache
+ verify(spylfs).mkdir(eq(privcache), isA(FsPermission.class), eq(false));
+ Path appDir =
+ new Path(base, new Path(ContainerLocalizer.APPCACHE, appId));
+ // $x/usercache/$user/appcache/$appId/filecache
+ Path appcache = new Path(appDir, ContainerLocalizer.FILECACHE);
+ verify(spylfs).mkdir(eq(appcache), isA(FsPermission.class), eq(false));
+ // $x/usercache/$user/appcache/$appId/output
+ Path appOutput = new Path(appDir, ContainerLocalizer.OUTPUTDIR);
+ verify(spylfs).mkdir(eq(appOutput), isA(FsPermission.class), eq(false));
+ }
+
+ // verify tokens read at expected location
+ verify(spylfs).open(tokenPath);
+
+ // verify downloaded resources reported to NM
+ verify(nmProxy).heartbeat(argThat(new HBMatches(rsrcA)));
+ verify(nmProxy).heartbeat(argThat(new HBMatches(rsrcB)));
+ verify(nmProxy).heartbeat(argThat(new HBMatches(rsrcC)));
+ verify(nmProxy).heartbeat(argThat(new HBMatches(rsrcD)));
+
+ // verify all HB use localizerID provided
+ verify(nmProxy, never()).heartbeat(argThat(
+ new ArgumentMatcher<LocalizerStatus>() {
+ @Override
+ public boolean matches(Object o) {
+ LocalizerStatus status = (LocalizerStatus) o;
+ return !cId.equals(status.getLocalizerId());
+ }
+ }));
+ }
+
+ static class HBMatches extends ArgumentMatcher<LocalizerStatus> {
+ final LocalResource rsrc;
+ HBMatches(LocalResource rsrc) {
+ this.rsrc = rsrc;
+ }
+ @Override
+ public boolean matches(Object o) {
+ LocalizerStatus status = (LocalizerStatus) o;
+ for (LocalResourceStatus localized : status.getResources()) {
+ switch (localized.getStatus()) {
+ case FETCH_SUCCESS:
+ if (localized.getLocalPath().getFile().contains(
+ rsrc.getResource().getFile())) {
+ return true;
+ }
+ break;
+ default:
+ fail("Unexpected: " + localized.getStatus());
+ break;
+ }
+ }
+ return false;
+ }
+ }
+
+ static class FakeDownload implements Callable<Path> {
+ private final Path localPath;
+ private final boolean succeed;
+ FakeDownload(String absPath, boolean succeed) {
+ this.localPath = new Path("file:///localcache" + absPath);
+ this.succeed = succeed;
+ }
+ @Override
+ public Path call() throws IOException {
+ if (!succeed) {
+ throw new IOException("FAIL " + localPath);
+ }
+ return localPath;
+ }
+ }
+
+ static RecordFactory getMockLocalizerRecordFactory() {
+ RecordFactory mockRF = mock(RecordFactory.class);
+ when(mockRF.newRecordInstance(same(LocalResourceStatus.class)))
+ .thenAnswer(new Answer<LocalResourceStatus>() {
+ @Override
+ public LocalResourceStatus answer(InvocationOnMock invoc)
+ throws Throwable {
+ return new MockLocalResourceStatus();
+ }
+ });
+ when(mockRF.newRecordInstance(same(LocalizerStatus.class)))
+ .thenAnswer(new Answer<LocalizerStatus>() {
+ @Override
+ public LocalizerStatus answer(InvocationOnMock invoc)
+ throws Throwable {
+ return new MockLocalizerStatus();
+ }
+ });
+ return mockRF;
+ }
+
+ static LocalResource getMockRsrc(Random r,
+ LocalResourceVisibility vis) {
+ LocalResource rsrc = mock(LocalResource.class);
+
+ String name = Long.toHexString(r.nextLong());
+ URL uri = mock(org.apache.hadoop.yarn.api.records.URL.class);
+ when(uri.getScheme()).thenReturn("file");
+ when(uri.getHost()).thenReturn(null);
+ when(uri.getFile()).thenReturn("/local/" + vis + "/" + name);
+
+ when(rsrc.getResource()).thenReturn(uri);
+ when(rsrc.getSize()).thenReturn(r.nextInt(1024) + 1024L);
+ when(rsrc.getTimestamp()).thenReturn(r.nextInt(1024) + 2048L);
+ when(rsrc.getType()).thenReturn(LocalResourceType.FILE);
+ when(rsrc.getVisibility()).thenReturn(vis);
+
+ return rsrc;
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+static DataInputBuffer createFakeCredentials(Random r, int nTok)
+ throws IOException {
+ Credentials creds = new Credentials();
+ byte[] password = new byte[20];
+ Text kind = new Text();
+ Text service = new Text();
+ Text alias = new Text();
+ for (int i = 0; i < nTok; ++i) {
+ byte[] identifier = ("idef" + i).getBytes();
+ r.nextBytes(password);
+ kind.set("kind" + i);
+ service.set("service" + i);
+ alias.set("token" + i);
+ Token token = new Token(identifier, password, kind, service);
+ creds.addToken(alias, token);
+ }
+ DataOutputBuffer buf = new DataOutputBuffer();
+ creds.writeTokenStorageToStream(buf);
+ DataInputBuffer ret = new DataInputBuffer();
+ ret.reset(buf.getData(), 0, buf.getLength());
+ return ret;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestFSDownload.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestFSDownload.java
new file mode 100644
index 0000000..fda8817
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestFSDownload.java
@@ -0,0 +1,126 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.FSDownload;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+import static org.apache.hadoop.fs.CreateFlag.*;
+
+
+import org.junit.AfterClass;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+public class TestFSDownload {
+
+ @AfterClass
+ public static void deleteTestDir() throws IOException {
+ FileContext fs = FileContext.getLocalFSFileContext();
+ fs.delete(new Path("target", TestFSDownload.class.getSimpleName()), true);
+ }
+
+ static final RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(null);
+
+ static LocalResource createFile(FileContext files, Path p, int len,
+ Random r) throws IOException, URISyntaxException {
+ FSDataOutputStream out = null;
+ try {
+ byte[] bytes = new byte[len];
+ out = files.create(p, EnumSet.of(CREATE, OVERWRITE));
+ r.nextBytes(bytes);
+ out.write(bytes);
+ } finally {
+ if (out != null) out.close();
+ }
+ LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
+ ret.setResource(ConverterUtils.getYarnUrlFromPath(p));
+ ret.setSize(len);
+ ret.setType(LocalResourceType.FILE);
+ ret.setTimestamp(files.getFileStatus(p).getModificationTime());
+ return ret;
+ }
+
+ @Test
+ public void testDownload() throws IOException, URISyntaxException,
+ InterruptedException {
+ Configuration conf = new Configuration();
+ FileContext files = FileContext.getLocalFSFileContext(conf);
+ final Path basedir = files.makeQualified(new Path("target",
+ TestFSDownload.class.getSimpleName()));
+ files.mkdir(basedir, null, true);
+ conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
+
+ Random rand = new Random();
+ long sharedSeed = rand.nextLong();
+ rand.setSeed(sharedSeed);
+ System.out.println("SEED: " + sharedSeed);
+
+ Map<LocalResource,Future<Path>> pending =
+ new HashMap<LocalResource,Future<Path>>();
+ ExecutorService exec = Executors.newSingleThreadExecutor();
+ LocalDirAllocator dirs =
+ new LocalDirAllocator(TestFSDownload.class.getName());
+ int[] sizes = new int[10];
+ for (int i = 0; i < 10; ++i) {
+ sizes[i] = rand.nextInt(512) + 512;
+ LocalResource rsrc = createFile(files, new Path(basedir, "" + i),
+ sizes[i], rand);
+ FSDownload fsd =
+ new FSDownload(files, UserGroupInformation.getCurrentUser(), conf,
+ dirs, rsrc, new Random(sharedSeed));
+ pending.put(rsrc, exec.submit(fsd));
+ }
+
+ try {
+ for (Map.Entry<LocalResource,Future<Path>> p : pending.entrySet()) {
+ Path localized = p.getValue().get();
+ assertEquals(sizes[Integer.valueOf(localized.getName())], p.getKey()
+ .getSize());
+ }
+ } catch (ExecutionException e) {
+ throw new IOException("Failed exec", e);
+ } finally {
+ exec.shutdown();
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResource.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResource.java
new file mode 100644
index 0000000..edfe7d7
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResource.java
@@ -0,0 +1,145 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
+
+import java.net.URISyntaxException;
+import java.util.Random;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+import static org.apache.hadoop.yarn.api.records.LocalResourceType.*;
+import static org.apache.hadoop.yarn.api.records.LocalResourceVisibility.*;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+public class TestLocalResource {
+
+ static org.apache.hadoop.yarn.api.records.LocalResource getYarnResource(Path p, long size,
+ long timestamp, LocalResourceType type, LocalResourceVisibility state)
+ throws URISyntaxException {
+ org.apache.hadoop.yarn.api.records.LocalResource ret = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(org.apache.hadoop.yarn.api.records.LocalResource.class);
+ ret.setResource(ConverterUtils.getYarnUrlFromURI(p.toUri()));
+ ret.setSize(size);
+ ret.setTimestamp(timestamp);
+ ret.setType(type);
+ ret.setVisibility(state);
+ return ret;
+ }
+
+ static void checkEqual(LocalResourceRequest a, LocalResourceRequest b) {
+ assertEquals(a, b);
+ assertEquals(a.hashCode(), b.hashCode());
+ assertEquals(0, a.compareTo(b));
+ assertEquals(0, b.compareTo(a));
+ }
+
+ static void checkNotEqual(LocalResourceRequest a, LocalResourceRequest b) {
+ assertFalse(a.equals(b));
+ assertFalse(b.equals(a));
+ assertFalse(a.hashCode() == b.hashCode());
+ assertFalse(0 == a.compareTo(b));
+ assertFalse(0 == b.compareTo(a));
+ }
+
+ @Test
+ public void testResourceEquality() throws URISyntaxException {
+ Random r = new Random();
+ long seed = r.nextLong();
+ r.setSeed(seed);
+ System.out.println("SEED: " + seed);
+
+ long basetime = r.nextLong() >>> 2;
+ org.apache.hadoop.yarn.api.records.LocalResource yA = getYarnResource(
+ new Path("http://yak.org:80/foobar"), -1, basetime, FILE, PUBLIC);
+ org.apache.hadoop.yarn.api.records.LocalResource yB = getYarnResource(
+ new Path("http://yak.org:80/foobar"), -1, basetime, FILE, PUBLIC);
+ final LocalResourceRequest a = new LocalResourceRequest(yA);
+ LocalResourceRequest b = new LocalResourceRequest(yA);
+ checkEqual(a, b);
+ b = new LocalResourceRequest(yB);
+ checkEqual(a, b);
+
+ // ignore visibility
+ yB = getYarnResource(
+ new Path("http://yak.org:80/foobar"), -1, basetime, FILE, PRIVATE);
+ b = new LocalResourceRequest(yB);
+ checkEqual(a, b);
+
+ // ignore size
+ yB = getYarnResource(
+ new Path("http://yak.org:80/foobar"), 0, basetime, FILE, PRIVATE);
+ b = new LocalResourceRequest(yB);
+ checkEqual(a, b);
+
+ // note path
+ yB = getYarnResource(
+ new Path("hdfs://dingo.org:80/foobar"), 0, basetime, ARCHIVE, PUBLIC);
+ b = new LocalResourceRequest(yB);
+ checkNotEqual(a, b);
+
+ // note type
+ yB = getYarnResource(
+ new Path("http://yak.org:80/foobar"), 0, basetime, ARCHIVE, PUBLIC);
+ b = new LocalResourceRequest(yB);
+ checkNotEqual(a, b);
+
+ // note timestamp
+ yB = getYarnResource(
+ new Path("http://yak.org:80/foobar"), 0, basetime + 1, FILE, PUBLIC);
+ b = new LocalResourceRequest(yB);
+ checkNotEqual(a, b);
+ }
+
+ @Test
+ public void testResourceOrder() throws URISyntaxException {
+ Random r = new Random();
+ long seed = r.nextLong();
+ r.setSeed(seed);
+ System.out.println("SEED: " + seed);
+ long basetime = r.nextLong() >>> 2;
+ org.apache.hadoop.yarn.api.records.LocalResource yA = getYarnResource(
+ new Path("http://yak.org:80/foobar"), -1, basetime, FILE, PUBLIC);
+ final LocalResourceRequest a = new LocalResourceRequest(yA);
+
+ // Path primary
+ org.apache.hadoop.yarn.api.records.LocalResource yB = getYarnResource(
+ new Path("http://yak.org:80/foobaz"), -1, basetime, FILE, PUBLIC);
+ LocalResourceRequest b = new LocalResourceRequest(yB);
+ assertTrue(0 > a.compareTo(b));
+
+ // timestamp secondary
+ yB = getYarnResource(
+ new Path("http://yak.org:80/foobar"), -1, basetime + 1, FILE, PUBLIC);
+ b = new LocalResourceRequest(yB);
+ assertTrue(0 > a.compareTo(b));
+
+ // type tertiary
+ yB = getYarnResource(
+ new Path("http://yak.org:80/foobar"), -1, basetime, ARCHIVE, PUBLIC);
+ b = new LocalResourceRequest(yB);
+ assertTrue(0 != a.compareTo(b)); // don't care about order, just ne
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalizedResource.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalizedResource.java
new file mode 100644
index 0000000..2a9f445
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalizedResource.java
@@ -0,0 +1,244 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.event.DrainDispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerResourceRequestEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceLocalizedEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceReleaseEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceRequestEvent;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+import org.mockito.ArgumentMatcher;
+import static org.mockito.Mockito.*;
+
+public class TestLocalizedResource {
+
+ static ContainerId getMockContainer(int id) {
+ ApplicationId appId = mock(ApplicationId.class);
+ when(appId.getClusterTimestamp()).thenReturn(314159265L);
+ when(appId.getId()).thenReturn(3);
+ ContainerId container = mock(ContainerId.class);
+ when(container.getId()).thenReturn(id);
+ when(container.getAppId()).thenReturn(appId);
+ return container;
+ }
+
+ @Test
+ @SuppressWarnings("unchecked") // mocked generic
+ public void testNotification() throws Exception {
+ DrainDispatcher dispatcher = new DrainDispatcher();
+ dispatcher.init(null);
+ try {
+ dispatcher.start();
+ EventHandler<ContainerEvent> containerBus = mock(EventHandler.class);
+ EventHandler<LocalizerEvent> localizerBus = mock(EventHandler.class);
+ dispatcher.register(ContainerEventType.class, containerBus);
+ dispatcher.register(LocalizerEventType.class, localizerBus);
+
+ // mock resource
+ LocalResource apiRsrc = createMockResource();
+
+ final ContainerId container0 = getMockContainer(0);
+ final Credentials creds0 = new Credentials();
+ final LocalResourceVisibility vis0 = LocalResourceVisibility.PRIVATE;
+ final LocalizerContext ctxt0 =
+ new LocalizerContext("yak", container0, creds0);
+ LocalResourceRequest rsrcA = new LocalResourceRequest(apiRsrc);
+ LocalizedResource local = new LocalizedResource(rsrcA, dispatcher);
+ local.handle(new ResourceRequestEvent(rsrcA, vis0, ctxt0));
+ dispatcher.await();
+
+ // Register C0, verify request event
+ LocalizerEventMatcher matchesL0Req =
+ new LocalizerEventMatcher(container0, creds0, vis0,
+ LocalizerEventType.REQUEST_RESOURCE_LOCALIZATION);
+ verify(localizerBus).handle(argThat(matchesL0Req));
+ assertEquals(ResourceState.DOWNLOADING, local.getState());
+
+ // Register C1, verify request event
+ final Credentials creds1 = new Credentials();
+ final ContainerId container1 = getMockContainer(1);
+ final LocalizerContext ctxt1 =
+ new LocalizerContext("yak", container1, creds1);
+ final LocalResourceVisibility vis1 = LocalResourceVisibility.PUBLIC;
+ local.handle(new ResourceRequestEvent(rsrcA, vis1, ctxt1));
+ dispatcher.await();
+ LocalizerEventMatcher matchesL1Req =
+ new LocalizerEventMatcher(container1, creds1, vis1,
+ LocalizerEventType.REQUEST_RESOURCE_LOCALIZATION);
+ verify(localizerBus).handle(argThat(matchesL1Req));
+
+ // Release C0 container localization, verify no notification
+ local.handle(new ResourceReleaseEvent(rsrcA, container0));
+ dispatcher.await();
+ verify(containerBus, never()).handle(isA(ContainerEvent.class));
+ assertEquals(ResourceState.DOWNLOADING, local.getState());
+
+ // Release C1 container localization, verify no notification
+ local.handle(new ResourceReleaseEvent(rsrcA, container1));
+ dispatcher.await();
+ verify(containerBus, never()).handle(isA(ContainerEvent.class));
+ assertEquals(ResourceState.INIT, local.getState());
+
+ // Register C2, C3
+ final ContainerId container2 = getMockContainer(2);
+ final LocalResourceVisibility vis2 = LocalResourceVisibility.PRIVATE;
+ final Credentials creds2 = new Credentials();
+ final LocalizerContext ctxt2 =
+ new LocalizerContext("yak", container2, creds2);
+
+ final ContainerId container3 = getMockContainer(3);
+ final LocalResourceVisibility vis3 = LocalResourceVisibility.PRIVATE;
+ final Credentials creds3 = new Credentials();
+ final LocalizerContext ctxt3 =
+ new LocalizerContext("yak", container3, creds3);
+
+ local.handle(new ResourceRequestEvent(rsrcA, vis2, ctxt2));
+ local.handle(new ResourceRequestEvent(rsrcA, vis3, ctxt3));
+ dispatcher.await();
+ LocalizerEventMatcher matchesL2Req =
+ new LocalizerEventMatcher(container2, creds2, vis2,
+ LocalizerEventType.REQUEST_RESOURCE_LOCALIZATION);
+ verify(localizerBus).handle(argThat(matchesL2Req));
+ LocalizerEventMatcher matchesL3Req =
+ new LocalizerEventMatcher(container3, creds3, vis3,
+ LocalizerEventType.REQUEST_RESOURCE_LOCALIZATION);
+ verify(localizerBus).handle(argThat(matchesL3Req));
+
+ // Successful localization. verify notification C2, C3
+ Path locA = new Path("file:///cache/rsrcA");
+ local.handle(new ResourceLocalizedEvent(rsrcA, locA, 10));
+ dispatcher.await();
+ ContainerEventMatcher matchesC2Localized =
+ new ContainerEventMatcher(container2,
+ ContainerEventType.RESOURCE_LOCALIZED);
+ ContainerEventMatcher matchesC3Localized =
+ new ContainerEventMatcher(container3,
+ ContainerEventType.RESOURCE_LOCALIZED);
+ verify(containerBus).handle(argThat(matchesC2Localized));
+ verify(containerBus).handle(argThat(matchesC3Localized));
+ assertEquals(ResourceState.LOCALIZED, local.getState());
+
+ // Register C4, verify notification
+ final ContainerId container4 = getMockContainer(4);
+ final Credentials creds4 = new Credentials();
+ final LocalizerContext ctxt4 =
+ new LocalizerContext("yak", container4, creds4);
+ final LocalResourceVisibility vis4 = LocalResourceVisibility.PRIVATE;
+ local.handle(new ResourceRequestEvent(rsrcA, vis4, ctxt4));
+ dispatcher.await();
+ ContainerEventMatcher matchesC4Localized =
+ new ContainerEventMatcher(container4,
+ ContainerEventType.RESOURCE_LOCALIZED);
+ verify(containerBus).handle(argThat(matchesC4Localized));
+ assertEquals(ResourceState.LOCALIZED, local.getState());
+ } finally {
+ dispatcher.stop();
+ }
+ }
+
+ @Test
+ public void testDirectLocalization() throws Exception {
+ DrainDispatcher dispatcher = new DrainDispatcher();
+ dispatcher.init(null);
+ try {
+ dispatcher.start();
+ LocalResource apiRsrc = createMockResource();
+ LocalResourceRequest rsrcA = new LocalResourceRequest(apiRsrc);
+ LocalizedResource local = new LocalizedResource(rsrcA, dispatcher);
+ Path p = new Path("file:///cache/rsrcA");
+ local.handle(new ResourceLocalizedEvent(rsrcA, p, 10));
+ dispatcher.await();
+ assertEquals(ResourceState.LOCALIZED, local.getState());
+ } finally {
+ dispatcher.stop();
+ }
+ }
+
+ static LocalResource createMockResource() {
+ // mock rsrc location
+ org.apache.hadoop.yarn.api.records.URL uriA =
+ mock(org.apache.hadoop.yarn.api.records.URL.class);
+ when(uriA.getScheme()).thenReturn("file");
+ when(uriA.getHost()).thenReturn(null);
+ when(uriA.getFile()).thenReturn("/localA/rsrc");
+
+ LocalResource apiRsrc = mock(LocalResource.class);
+ when(apiRsrc.getResource()).thenReturn(uriA);
+ when(apiRsrc.getTimestamp()).thenReturn(4344L);
+ when(apiRsrc.getType()).thenReturn(LocalResourceType.FILE);
+ return apiRsrc;
+ }
+
+
+ static class LocalizerEventMatcher extends ArgumentMatcher<LocalizerEvent> {
+ Credentials creds;
+ LocalResourceVisibility vis;
+ private final ContainerId idRef;
+ private final LocalizerEventType type;
+
+ public LocalizerEventMatcher(ContainerId idRef, Credentials creds,
+ LocalResourceVisibility vis, LocalizerEventType type) {
+ this.vis = vis;
+ this.type = type;
+ this.creds = creds;
+ this.idRef = idRef;
+ }
+ @Override
+ public boolean matches(Object o) {
+ if (!(o instanceof LocalizerResourceRequestEvent)) return false;
+ LocalizerResourceRequestEvent evt = (LocalizerResourceRequestEvent) o;
+ return idRef == evt.getContext().getContainerId()
+ && type == evt.getType()
+ && vis == evt.getVisibility()
+ && creds == evt.getContext().getCredentials();
+ }
+ }
+
+ static class ContainerEventMatcher extends ArgumentMatcher<ContainerEvent> {
+ private final ContainerId idRef;
+ private final ContainerEventType type;
+ public ContainerEventMatcher(ContainerId idRef, ContainerEventType type) {
+ this.idRef = idRef;
+ this.type = type;
+ }
+ @Override
+ public boolean matches(Object o) {
+ if (!(o instanceof ContainerEvent)) return false;
+ ContainerEvent evt = (ContainerEvent) o;
+ return idRef == evt.getContainerID() && type == evt.getType();
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
new file mode 100644
index 0000000..7a18fec
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
@@ -0,0 +1,290 @@
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
+
+import java.net.InetSocketAddress;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.avro.ipc.Server;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.AbstractFileSystem;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.event.DrainDispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerAction;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus;
+import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.ResourceStatusType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ApplicationLocalizationEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationRequestEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEventType;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+import org.mockito.ArgumentMatcher;
+import static org.mockito.Mockito.*;
+
+import static org.apache.hadoop.yarn.server.nodemanager.NMConfig.NM_LOCAL_DIR;
+
+public class TestResourceLocalizationService {
+
+ static final Path basedir =
+ new Path("target", TestResourceLocalizationService.class.getName());
+
+ @Test
+ public void testLocalizationInit() throws Exception {
+ final Configuration conf = new Configuration();
+ AsyncDispatcher dispatcher = new AsyncDispatcher();
+ dispatcher.init(null);
+
+ ContainerExecutor exec = mock(ContainerExecutor.class);
+ DeletionService delService = spy(new DeletionService(exec));
+ delService.init(null);
+ delService.start();
+
+ AbstractFileSystem spylfs =
+ spy(FileContext.getLocalFSFileContext().getDefaultFileSystem());
+ FileContext lfs = FileContext.getFileContext(spylfs, conf);
+ doNothing().when(spylfs).mkdir(
+ isA(Path.class), isA(FsPermission.class), anyBoolean());
+
+ ResourceLocalizationService locService =
+ spy(new ResourceLocalizationService(dispatcher, exec, delService));
+ doReturn(lfs)
+ .when(locService).getLocalFileContext(isA(Configuration.class));
+ try {
+ dispatcher.start();
+ List<Path> localDirs = new ArrayList<Path>();
+ String[] sDirs = new String[4];
+ for (int i = 0; i < 4; ++i) {
+ localDirs.add(lfs.makeQualified(new Path(basedir, i + "")));
+ sDirs[i] = localDirs.get(i).toString();
+ }
+ conf.setStrings(NM_LOCAL_DIR, sDirs);
+
+ // initialize ResourceLocalizationService
+ locService.init(conf);
+
+ // verify directory creation
+ for (Path p : localDirs) {
+ Path usercache = new Path(p, ContainerLocalizer.USERCACHE);
+ verify(spylfs)
+ .mkdir(eq(usercache), isA(FsPermission.class), eq(true));
+ Path publicCache = new Path(p, ContainerLocalizer.FILECACHE);
+ verify(spylfs)
+ .mkdir(eq(publicCache), isA(FsPermission.class), eq(true));
+ Path nmPriv = new Path(p, ResourceLocalizationService.NM_PRIVATE_DIR);
+ verify(spylfs).mkdir(eq(nmPriv),
+ eq(ResourceLocalizationService.NM_PRIVATE_PERM), eq(true));
+ }
+ } finally {
+ dispatcher.stop();
+ delService.stop();
+ }
+ }
+
+ @Test
+ @SuppressWarnings("unchecked") // mocked generics
+ public void testLocalizationHeartbeat() throws Exception {
+ Configuration conf = new Configuration();
+ AbstractFileSystem spylfs =
+ spy(FileContext.getLocalFSFileContext().getDefaultFileSystem());
+ final FileContext lfs = FileContext.getFileContext(spylfs, conf);
+ doNothing().when(spylfs).mkdir(
+ isA(Path.class), isA(FsPermission.class), anyBoolean());
+
+ List<Path> localDirs = new ArrayList<Path>();
+ String[] sDirs = new String[4];
+ for (int i = 0; i < 4; ++i) {
+ localDirs.add(lfs.makeQualified(new Path(basedir, i + "")));
+ sDirs[i] = localDirs.get(i).toString();
+ }
+ conf.setStrings(NM_LOCAL_DIR, sDirs);
+
+ Server ignore = mock(Server.class);
+ DrainDispatcher dispatcher = new DrainDispatcher();
+ dispatcher.init(conf);
+ dispatcher.start();
+ EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class);
+ dispatcher.register(ApplicationEventType.class, applicationBus);
+ EventHandler<ContainerEvent> containerBus = mock(EventHandler.class);
+ dispatcher.register(ContainerEventType.class, containerBus);
+
+ ContainerExecutor exec = mock(ContainerExecutor.class);
+ DeletionService delService = new DeletionService(exec);
+ delService.init(null);
+ delService.start();
+
+ ResourceLocalizationService rawService =
+ new ResourceLocalizationService(dispatcher, exec, delService);
+ ResourceLocalizationService spyService = spy(rawService);
+ doReturn(ignore).when(spyService).createServer();
+ doReturn(lfs).when(spyService).getLocalFileContext(isA(Configuration.class));
+ try {
+ spyService.init(conf);
+ spyService.start();
+
+ // init application
+ final Application app = mock(Application.class);
+ final ApplicationId appId = mock(ApplicationId.class);
+ when(appId.getClusterTimestamp()).thenReturn(314159265358979L);
+ when(appId.getId()).thenReturn(3);
+ when(app.getUser()).thenReturn("user0");
+ when(app.getAppId()).thenReturn(appId);
+ spyService.handle(new ApplicationLocalizationEvent(
+ LocalizationEventType.INIT_APPLICATION_RESOURCES, app));
+ ArgumentMatcher<ApplicationEvent> matchesAppInit =
+ new ArgumentMatcher<ApplicationEvent>() {
+ @Override
+ public boolean matches(Object o) {
+ ApplicationEvent evt = (ApplicationEvent) o;
+ return evt.getType() == ApplicationEventType.APPLICATION_INITED
+ && appId == evt.getApplicationID();
+ }
+ };
+ dispatcher.await();
+ verify(applicationBus).handle(argThat(matchesAppInit));
+
+ // init container rsrc, localizer
+ Random r = new Random();
+ long seed = r.nextLong();
+ System.out.println("SEED: " + seed);
+ r.setSeed(seed);
+ final Container c = getMockContainer(appId, 42);
+ FSDataOutputStream out =
+ new FSDataOutputStream(new DataOutputBuffer(), null);
+ doReturn(out).when(spylfs).createInternal(isA(Path.class),
+ isA(EnumSet.class), isA(FsPermission.class), anyInt(), anyShort(),
+ anyLong(), isA(Progressable.class), anyInt(), anyBoolean());
+ final LocalResource resource = getMockResource(r);
+ final LocalResourceRequest req = new LocalResourceRequest(resource);
+ spyService.handle(new ContainerLocalizationRequestEvent(
+ c, Collections.singletonList(req),
+ LocalResourceVisibility.PRIVATE));
+ // Sigh. Thread init of private localizer not accessible
+ Thread.sleep(500);
+ dispatcher.await();
+ String appStr = ConverterUtils.toString(appId);
+ String ctnrStr = ConverterUtils.toString(c.getContainerID());
+ verify(exec).startLocalizer(isA(Path.class), isA(InetSocketAddress.class),
+ eq("user0"), eq(appStr), eq(ctnrStr), isA(List.class));
+
+ // heartbeat from localizer
+ LocalResourceStatus rsrcStat = mock(LocalResourceStatus.class);
+ LocalizerStatus stat = mock(LocalizerStatus.class);
+ when(stat.getLocalizerId()).thenReturn(ctnrStr);
+ when(rsrcStat.getResource()).thenReturn(resource);
+ when(rsrcStat.getLocalSize()).thenReturn(4344L);
+ URL locPath = getPath("/cache/private/blah");
+ when(rsrcStat.getLocalPath()).thenReturn(locPath);
+ when(rsrcStat.getStatus()).thenReturn(ResourceStatusType.FETCH_SUCCESS);
+ when(stat.getResources())
+ .thenReturn(Collections.<LocalResourceStatus>emptyList())
+ .thenReturn(Collections.singletonList(rsrcStat))
+ .thenReturn(Collections.<LocalResourceStatus>emptyList());
+
+ // get rsrc
+ LocalizerHeartbeatResponse response = spyService.heartbeat(stat);
+ assertEquals(LocalizerAction.LIVE, response.getLocalizerAction());
+ assertEquals(req, new LocalResourceRequest(response.getLocalResource(0)));
+
+ // empty rsrc
+ response = spyService.heartbeat(stat);
+ assertEquals(LocalizerAction.LIVE, response.getLocalizerAction());
+ assertEquals(0, response.getAllResources().size());
+
+ // get shutdown
+ response = spyService.heartbeat(stat);
+ assertEquals(LocalizerAction.DIE, response.getLocalizerAction());
+
+ // verify container notification
+ ArgumentMatcher<ContainerEvent> matchesContainerLoc =
+ new ArgumentMatcher<ContainerEvent>() {
+ @Override
+ public boolean matches(Object o) {
+ ContainerEvent evt = (ContainerEvent) o;
+ return evt.getType() == ContainerEventType.RESOURCE_LOCALIZED
+ && c.getContainerID() == evt.getContainerID();
+ }
+ };
+ dispatcher.await();
+ verify(containerBus).handle(argThat(matchesContainerLoc));
+ } finally {
+ delService.stop();
+ dispatcher.stop();
+ spyService.stop();
+ }
+ }
+
+ static URL getPath(String path) {
+ URL uri = mock(org.apache.hadoop.yarn.api.records.URL.class);
+ when(uri.getScheme()).thenReturn("file");
+ when(uri.getHost()).thenReturn(null);
+ when(uri.getFile()).thenReturn(path);
+ return uri;
+ }
+
+ static LocalResource getMockResource(Random r) {
+ LocalResource rsrc = mock(LocalResource.class);
+
+ String name = Long.toHexString(r.nextLong());
+ URL uri = getPath("/local/PRIVATE/" + name);
+
+ when(rsrc.getResource()).thenReturn(uri);
+ when(rsrc.getSize()).thenReturn(r.nextInt(1024) + 1024L);
+ when(rsrc.getTimestamp()).thenReturn(r.nextInt(1024) + 2048L);
+ when(rsrc.getType()).thenReturn(LocalResourceType.FILE);
+ when(rsrc.getVisibility()).thenReturn(LocalResourceVisibility.PRIVATE);
+ return rsrc;
+ }
+
+ static Container getMockContainer(ApplicationId appId, int id) {
+ Container c = mock(Container.class);
+ ContainerId cId = mock(ContainerId.class);
+ when(cId.getAppId()).thenReturn(appId);
+ when(cId.getId()).thenReturn(id);
+ when(c.getUser()).thenReturn("user0");
+ when(c.getContainerID()).thenReturn(cId);
+ Credentials creds = new Credentials();
+ creds.addToken(new Text("tok" + id), getToken(id));
+ when(c.getCredentials()).thenReturn(creds);
+ return c;
+ }
+
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ static Token<? extends TokenIdentifier> getToken(int id) {
+ return new Token(("ident" + id).getBytes(), ("passwd" + id).getBytes(),
+ new Text("kind" + id), new Text("service" + id));
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceRetention.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceRetention.java
new file mode 100644
index 0000000..64eec61
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceRetention.java
@@ -0,0 +1,83 @@
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
+
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+import org.mockito.ArgumentCaptor;
+import static org.mockito.Mockito.*;
+
+public class TestResourceRetention {
+
+ @Test
+ public void testRsrcUnused() {
+ DeletionService delService = mock(DeletionService.class);
+ long TARGET_MB = 10 << 20;
+ ResourceRetentionSet rss = new ResourceRetentionSet(delService, TARGET_MB);
+ // 3MB files @{10, 15}
+ LocalResourcesTracker pubTracker =
+ createMockTracker(null, 3 * 1024 * 1024, 2, 10, 5);
+ // 1MB files @{3, 6, 9, 12}
+ LocalResourcesTracker trackerA =
+ createMockTracker("A", 1 * 1024 * 1024, 4, 3, 3);
+ // 4MB file @{1}
+ LocalResourcesTracker trackerB =
+ createMockTracker("B", 4 * 1024 * 1024, 1, 10, 5);
+ // 2MB files @{7, 9, 11}
+ LocalResourcesTracker trackerC =
+ createMockTracker("C", 2 * 1024 * 1024, 3, 7, 2);
+ // Total cache: 20MB; verify removed at least 10MB
+ rss.addResources(pubTracker);
+ rss.addResources(trackerA);
+ rss.addResources(trackerB);
+ rss.addResources(trackerC);
+ long deleted = 0L;
+ ArgumentCaptor<LocalizedResource> captor =
+ ArgumentCaptor.forClass(LocalizedResource.class);
+ verify(pubTracker, atMost(2))
+ .remove(captor.capture(), isA(DeletionService.class));
+ verify(trackerA, atMost(4))
+ .remove(captor.capture(), isA(DeletionService.class));
+ verify(trackerB, atMost(1))
+ .remove(captor.capture(), isA(DeletionService.class));
+ verify(trackerC, atMost(3))
+ .remove(captor.capture(), isA(DeletionService.class));
+ for (LocalizedResource rem : captor.getAllValues()) {
+ deleted += rem.getSize();
+ }
+ assertTrue(deleted >= 10 * 1024 * 1024);
+ assertTrue(deleted < 15 * 1024 * 1024);
+ }
+
+ LocalResourcesTracker createMockTracker(String user, final long rsrcSize,
+ long nRsrcs, long timestamp, long tsstep) {
+ ConcurrentMap<LocalResourceRequest,LocalizedResource> trackerResources =
+ new ConcurrentHashMap<LocalResourceRequest,LocalizedResource>();
+ LocalResourcesTracker ret = spy(new LocalResourcesTrackerImpl(user, null,
+ trackerResources));
+ for (int i = 0; i < nRsrcs; ++i) {
+ final LocalResourceRequest req = new LocalResourceRequest(
+ new Path("file:///" + user + "/rsrc" + i), timestamp + i * tsstep,
+ LocalResourceType.FILE);
+ final long ts = timestamp + i * tsstep;
+ final Path p = new Path("file:///local/" + user + "/rsrc" + i);
+ LocalizedResource rsrc = new LocalizedResource(req, null) {
+ @Override public int getRefCount() { return 0; }
+ @Override public long getSize() { return rsrcSize; }
+ @Override public Path getLocalPath() { return p; }
+ @Override public long getTimestamp() { return ts; }
+ @Override
+ public ResourceState getState() { return ResourceState.LOCALIZED; }
+ };
+ trackerResources.put(req, rsrc);
+ }
+ return ret;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
new file mode 100644
index 0000000..86176e9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
@@ -0,0 +1,415 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation;
+
+import java.io.DataInputStream;
+import java.io.EOFException;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.Writer;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.NMConfig;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.AggregatedLogFormat.LogKey;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.AggregatedLogFormat.LogReader;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.event.LogAggregatorAppFinishedEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.event.LogAggregatorAppStartedEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.event.LogAggregatorContainerFinishedEvent;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.junit.Ignore;
+import org.junit.Test;
+
+@Ignore
+public class TestLogAggregationService extends BaseContainerManagerTest {
+
+ static {
+ LOG = LogFactory.getLog(TestLogAggregationService.class);
+ }
+
+ private static RecordFactory recordFactory = RecordFactoryProvider
+ .getRecordFactory(null);
+
+ private File remoteRootLogDir = new File("target", this.getClass()
+ .getName() + "-remoteLogDir");
+
+ public TestLogAggregationService() throws UnsupportedFileSystemException {
+ super();
+ this.remoteRootLogDir.mkdir();
+ }
+
+ @Override
+ public void tearDown() throws IOException, InterruptedException {
+ super.tearDown();
+ createContainerExecutor().deleteAsUser(user,
+ new Path(remoteRootLogDir.getAbsolutePath()), new Path[] {});
+ }
+
+ @Test
+ public void testLocalFileDeletionAfterUpload() throws IOException {
+ this.delSrvc = new DeletionService(createContainerExecutor());
+ this.delSrvc.init(conf);
+ this.conf.set(NMConfig.NM_LOG_DIR, localLogDir.getAbsolutePath());
+ this.conf.set(NMConfig.REMOTE_USER_LOG_DIR,
+ this.remoteRootLogDir.getAbsolutePath());
+ LogAggregationService logAggregationService =
+ new LogAggregationService(this.delSrvc);
+ logAggregationService.init(this.conf);
+ logAggregationService.start();
+
+ ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);
+
+ // AppLogDir should be created
+ File app1LogDir =
+ new File(localLogDir, ConverterUtils.toString(application1));
+ app1LogDir.mkdir();
+ logAggregationService
+ .handle(new LogAggregatorAppStartedEvent(
+ application1, this.user, null,
+ ContainerLogsRetentionPolicy.ALL_CONTAINERS));
+
+ ContainerId container11 =
+ BuilderUtils.newContainerId(recordFactory, application1, 1);
+ // Simulate log-file creation
+ writeContainerLogs(app1LogDir, container11);
+ logAggregationService.handle(new LogAggregatorContainerFinishedEvent(
+ container11, "0"));
+
+ logAggregationService.handle(new LogAggregatorAppFinishedEvent(
+ application1));
+
+ logAggregationService.stop();
+
+ String containerIdStr = ConverterUtils.toString(container11);
+ File containerLogDir = new File(app1LogDir, containerIdStr);
+ for (String fileType : new String[] { "stdout", "stderr", "syslog" }) {
+ Assert.assertFalse(new File(containerLogDir, fileType).exists());
+ }
+
+ Assert.assertFalse(app1LogDir.exists());
+
+ Assert.assertTrue(new File(logAggregationService
+ .getRemoteNodeLogFileForApp(application1).toUri().getPath()).exists());
+ }
+
+ @Test
+ public void testNoContainerOnNode() {
+ this.conf.set(NMConfig.NM_LOG_DIR, localLogDir.getAbsolutePath());
+ this.conf.set(NMConfig.REMOTE_USER_LOG_DIR,
+ this.remoteRootLogDir.getAbsolutePath());
+ LogAggregationService logAggregationService =
+ new LogAggregationService(this.delSrvc);
+ logAggregationService.init(this.conf);
+ logAggregationService.start();
+
+ ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);
+
+ // AppLogDir should be created
+ File app1LogDir =
+ new File(localLogDir, ConverterUtils.toString(application1));
+ app1LogDir.mkdir();
+ logAggregationService
+ .handle(new LogAggregatorAppStartedEvent(
+ application1, this.user, null,
+ ContainerLogsRetentionPolicy.ALL_CONTAINERS));
+
+ logAggregationService.handle(new LogAggregatorAppFinishedEvent(
+ application1));
+
+ logAggregationService.stop();
+
+ Assert
+ .assertFalse(new File(logAggregationService
+ .getRemoteNodeLogFileForApp(application1).toUri().getPath())
+ .exists());
+ }
+
+ @Test
+ public void testMultipleAppsLogAggregation() throws IOException {
+
+ this.conf.set(NMConfig.NM_LOG_DIR, localLogDir.getAbsolutePath());
+ this.conf.set(NMConfig.REMOTE_USER_LOG_DIR,
+ this.remoteRootLogDir.getAbsolutePath());
+ LogAggregationService logAggregationService =
+ new LogAggregationService(this.delSrvc);
+ logAggregationService.init(this.conf);
+ logAggregationService.start();
+
+ ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);
+
+ // AppLogDir should be created
+ File app1LogDir =
+ new File(localLogDir, ConverterUtils.toString(application1));
+ app1LogDir.mkdir();
+ logAggregationService
+ .handle(new LogAggregatorAppStartedEvent(
+ application1, this.user, null,
+ ContainerLogsRetentionPolicy.ALL_CONTAINERS));
+
+ ContainerId container11 =
+ BuilderUtils.newContainerId(recordFactory, application1, 1);
+ // Simulate log-file creation
+ writeContainerLogs(app1LogDir, container11);
+ logAggregationService.handle(new LogAggregatorContainerFinishedEvent(
+ container11, "0"));
+
+ ApplicationId application2 = BuilderUtils.newApplicationId(1234, 2);
+
+ File app2LogDir =
+ new File(localLogDir, ConverterUtils.toString(application2));
+ app2LogDir.mkdir();
+ logAggregationService.handle(new LogAggregatorAppStartedEvent(
+ application2, this.user, null,
+ ContainerLogsRetentionPolicy.APPLICATION_MASTER_ONLY));
+
+ ContainerId container21 =
+ BuilderUtils.newContainerId(recordFactory, application2, 1);
+ writeContainerLogs(app2LogDir, container21);
+ logAggregationService.handle(new LogAggregatorContainerFinishedEvent(
+ container21, "0"));
+
+ ContainerId container12 =
+ BuilderUtils.newContainerId(recordFactory, application1, 2);
+ writeContainerLogs(app1LogDir, container12);
+ logAggregationService.handle(new LogAggregatorContainerFinishedEvent(
+ container12, "0"));
+
+ ApplicationId application3 = BuilderUtils.newApplicationId(1234, 3);
+
+ File app3LogDir =
+ new File(localLogDir, ConverterUtils.toString(application3));
+ app3LogDir.mkdir();
+ logAggregationService.handle(new LogAggregatorAppStartedEvent(
+ application3, this.user, null,
+ ContainerLogsRetentionPolicy.AM_AND_FAILED_CONTAINERS_ONLY));
+
+ ContainerId container31 =
+ BuilderUtils.newContainerId(recordFactory, application3, 1);
+ writeContainerLogs(app3LogDir, container31);
+ logAggregationService.handle(new LogAggregatorContainerFinishedEvent(
+ container31, "0"));
+
+ ContainerId container32 =
+ BuilderUtils.newContainerId(recordFactory, application3, 2);
+ writeContainerLogs(app3LogDir, container32);
+ logAggregationService.handle(new LogAggregatorContainerFinishedEvent(
+ container32, "1")); // Failed container
+
+ ContainerId container22 =
+ BuilderUtils.newContainerId(recordFactory, application2, 2);
+ writeContainerLogs(app2LogDir, container22);
+ logAggregationService.handle(new LogAggregatorContainerFinishedEvent(
+ container22, "0"));
+
+ ContainerId container33 =
+ BuilderUtils.newContainerId(recordFactory, application3, 3);
+ writeContainerLogs(app3LogDir, container33);
+ logAggregationService.handle(new LogAggregatorContainerFinishedEvent(
+ container33, "0"));
+
+ logAggregationService.handle(new LogAggregatorAppFinishedEvent(
+ application2));
+ logAggregationService.handle(new LogAggregatorAppFinishedEvent(
+ application3));
+ logAggregationService.handle(new LogAggregatorAppFinishedEvent(
+ application1));
+
+ logAggregationService.stop();
+
+ verifyContainerLogs(logAggregationService, application1,
+ new ContainerId[] { container11, container12 });
+ verifyContainerLogs(logAggregationService, application2,
+ new ContainerId[] { container21 });
+ verifyContainerLogs(logAggregationService, application3,
+ new ContainerId[] { container31, container32 });
+ }
+
+ private void writeContainerLogs(File appLogDir, ContainerId containerId)
+ throws IOException {
+ // ContainerLogDir should be created
+ String containerStr = ConverterUtils.toString(containerId);
+ File containerLogDir = new File(appLogDir, containerStr);
+ containerLogDir.mkdir();
+ for (String fileType : new String[] { "stdout", "stderr", "syslog" }) {
+ Writer writer11 = new FileWriter(new File(containerLogDir, fileType));
+ writer11.write(containerStr + " Hello " + fileType + "!");
+ writer11.close();
+ }
+ }
+
+ private void verifyContainerLogs(
+ LogAggregationService logAggregationService, ApplicationId appId,
+ ContainerId[] expectedContainerIds) throws IOException {
+ AggregatedLogFormat.LogReader reader =
+ new AggregatedLogFormat.LogReader(this.conf,
+ logAggregationService.getRemoteNodeLogFileForApp(appId));
+ try {
+ Map<String, Map<String, String>> logMap =
+ new HashMap<String, Map<String, String>>();
+ DataInputStream valueStream;
+
+ LogKey key = new LogKey();
+ valueStream = reader.next(key);
+
+ while (valueStream != null) {
+ LOG.info("Found container " + key.toString());
+ Map<String, String> perContainerMap = new HashMap<String, String>();
+ logMap.put(key.toString(), perContainerMap);
+
+ while (true) {
+ try {
+ DataOutputBuffer dob = new DataOutputBuffer();
+ LogReader.readAContainerLogsForALogType(valueStream, dob);
+
+ DataInputBuffer dib = new DataInputBuffer();
+ dib.reset(dob.getData(), dob.getLength());
+
+ Assert.assertEquals("\nLogType:", dib.readUTF());
+ String fileType = dib.readUTF();
+
+ Assert.assertEquals("\nLogLength:", dib.readUTF());
+ String fileLengthStr = dib.readUTF();
+ long fileLength = Long.parseLong(fileLengthStr);
+
+ Assert.assertEquals("\nLog Contents:\n", dib.readUTF());
+ byte[] buf = new byte[(int) fileLength]; // cast is okay in this
+ // test.
+ dib.read(buf, 0, (int) fileLength);
+ perContainerMap.put(fileType, new String(buf));
+
+ LOG.info("LogType:" + fileType);
+ LOG.info("LogType:" + fileLength);
+ LOG.info("Log Contents:\n" + perContainerMap.get(fileType));
+ } catch (EOFException eof) {
+ break;
+ }
+ }
+
+ // Next container
+ key = new LogKey();
+ valueStream = reader.next(key);
+ }
+
+ // 1 for each container
+ Assert.assertEquals(expectedContainerIds.length, logMap.size());
+ for (ContainerId cId : expectedContainerIds) {
+ String containerStr = ConverterUtils.toString(cId);
+ Map<String, String> thisContainerMap = logMap.remove(containerStr);
+ Assert.assertEquals(3, thisContainerMap.size());
+ for (String fileType : new String[] { "stdout", "stderr", "syslog" }) {
+ String expectedValue = containerStr + " Hello " + fileType + "!";
+ LOG.info("Expected log-content : " + new String(expectedValue));
+ String foundValue = thisContainerMap.remove(fileType);
+ Assert.assertNotNull(cId + " " + fileType
+ + " not present in aggregated log-file!", foundValue);
+ Assert.assertEquals(expectedValue, foundValue);
+ }
+ Assert.assertEquals(0, thisContainerMap.size());
+ }
+ Assert.assertEquals(0, logMap.size());
+ } finally {
+ reader.close();
+ }
+ }
+
+ @Test
+ public void testLogAggregationForRealContainerLaunch() throws IOException,
+ InterruptedException {
+
+ this.containerManager.start();
+
+ File scriptFile = new File(tmpDir, "scriptFile.sh");
+ PrintWriter fileWriter = new PrintWriter(scriptFile);
+ fileWriter.write("\necho Hello World! Stdout! > "
+ + new File(localLogDir, "stdout"));
+ fileWriter.write("\necho Hello World! Stderr! > "
+ + new File(localLogDir, "stderr"));
+ fileWriter.write("\necho Hello World! Syslog! > "
+ + new File(localLogDir, "syslog"));
+ fileWriter.close();
+
+ ContainerLaunchContext containerLaunchContext =
+ recordFactory.newRecordInstance(ContainerLaunchContext.class);
+
+ // ////// Construct the Container-id
+ ApplicationId appId =
+ recordFactory.newRecordInstance(ApplicationId.class);
+ ContainerId cId = recordFactory.newRecordInstance(ContainerId.class);
+ cId.setAppId(appId);
+ containerLaunchContext.setContainerId(cId);
+
+ containerLaunchContext.setUser(this.user);
+
+ URL resource_alpha =
+ ConverterUtils.getYarnUrlFromPath(localFS
+ .makeQualified(new Path(scriptFile.getAbsolutePath())));
+ LocalResource rsrc_alpha =
+ recordFactory.newRecordInstance(LocalResource.class);
+ rsrc_alpha.setResource(resource_alpha);
+ rsrc_alpha.setSize(-1);
+ rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
+ rsrc_alpha.setType(LocalResourceType.FILE);
+ rsrc_alpha.setTimestamp(scriptFile.lastModified());
+ String destinationFile = "dest_file";
+ containerLaunchContext.setLocalResource(destinationFile, rsrc_alpha);
+ containerLaunchContext.setUser(containerLaunchContext.getUser());
+ containerLaunchContext.addCommand("/bin/bash");
+ containerLaunchContext.addCommand(scriptFile.getAbsolutePath());
+ containerLaunchContext.setResource(recordFactory
+ .newRecordInstance(Resource.class));
+ containerLaunchContext.getResource().setMemory(100 * 1024 * 1024);
+ StartContainerRequest startRequest =
+ recordFactory.newRecordInstance(StartContainerRequest.class);
+ startRequest.setContainerLaunchContext(containerLaunchContext);
+ this.containerManager.startContainer(startRequest);
+
+ BaseContainerManagerTest.waitForContainerState(this.containerManager,
+ cId, ContainerState.COMPLETE);
+
+ this.containerManager.handle(new CMgrCompletedAppsEvent(Arrays
+ .asList(appId)));
+ this.containerManager.stop();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
new file mode 100644
index 0000000..a414fdfe
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
@@ -0,0 +1,266 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.regex.Pattern;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.util.LinuxResourceCalculatorPlugin;
+import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree;
+import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
+import org.apache.hadoop.yarn.util.TestProcfsBasedProcessTree;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestContainersMonitor extends BaseContainerManagerTest {
+
+ public TestContainersMonitor() throws UnsupportedFileSystemException {
+ super();
+ }
+
+ static {
+ LOG = LogFactory.getLog(TestContainersMonitor.class);
+ }
+ @Before
+ public void setup() throws IOException {
+ conf.setClass(
+ ContainersMonitorImpl.RESOURCE_CALCULATOR_PLUGIN_CONFIG_KEY,
+ LinuxResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
+ super.setup();
+ }
+
+ /**
+ * Test to verify the check for whether a process tree is over limit or not.
+ *
+ * @throws IOException
+ * if there was a problem setting up the fake procfs directories or
+ * files.
+ */
+ @Test
+ public void testProcessTreeLimits() throws IOException {
+
+ // set up a dummy proc file system
+ File procfsRootDir = new File(localDir, "proc");
+ String[] pids = { "100", "200", "300", "400", "500", "600", "700" };
+ try {
+ TestProcfsBasedProcessTree.setupProcfsRootDir(procfsRootDir);
+
+ // create pid dirs.
+ TestProcfsBasedProcessTree.setupPidDirs(procfsRootDir, pids);
+
+ // create process infos.
+ TestProcfsBasedProcessTree.ProcessStatInfo[] procs =
+ new TestProcfsBasedProcessTree.ProcessStatInfo[7];
+
+ // assume pids 100, 500 are in 1 tree
+ // 200,300,400 are in another
+ // 600,700 are in a third
+ procs[0] = new TestProcfsBasedProcessTree.ProcessStatInfo(
+ new String[] { "100", "proc1", "1", "100", "100", "100000" });
+ procs[1] = new TestProcfsBasedProcessTree.ProcessStatInfo(
+ new String[] { "200", "proc2", "1", "200", "200", "200000" });
+ procs[2] = new TestProcfsBasedProcessTree.ProcessStatInfo(
+ new String[] { "300", "proc3", "200", "200", "200", "300000" });
+ procs[3] = new TestProcfsBasedProcessTree.ProcessStatInfo(
+ new String[] { "400", "proc4", "200", "200", "200", "400000" });
+ procs[4] = new TestProcfsBasedProcessTree.ProcessStatInfo(
+ new String[] { "500", "proc5", "100", "100", "100", "1500000" });
+ procs[5] = new TestProcfsBasedProcessTree.ProcessStatInfo(
+ new String[] { "600", "proc6", "1", "600", "600", "100000" });
+ procs[6] = new TestProcfsBasedProcessTree.ProcessStatInfo(
+ new String[] { "700", "proc7", "600", "600", "600", "100000" });
+ // write stat files.
+ TestProcfsBasedProcessTree.writeStatFiles(procfsRootDir, pids, procs);
+
+ // vmem limit
+ long limit = 700000;
+
+ ContainersMonitorImpl test = new ContainersMonitorImpl(null, null, null);
+
+ // create process trees
+ // tree rooted at 100 is over limit immediately, as it is
+ // twice over the mem limit.
+ ProcfsBasedProcessTree pTree = new ProcfsBasedProcessTree(
+ "100", true,
+ procfsRootDir.getAbsolutePath());
+ pTree.getProcessTree();
+ assertTrue("tree rooted at 100 should be over limit " +
+ "after first iteration.",
+ test.isProcessTreeOverLimit(pTree, "dummyId", limit));
+
+ // the tree rooted at 200 is initially below limit.
+ pTree = new ProcfsBasedProcessTree("200", true,
+ procfsRootDir.getAbsolutePath());
+ pTree.getProcessTree();
+ assertFalse("tree rooted at 200 shouldn't be over limit " +
+ "after one iteration.",
+ test.isProcessTreeOverLimit(pTree, "dummyId", limit));
+ // second iteration - now the tree has been over limit twice,
+ // hence it should be declared over limit.
+ pTree.getProcessTree();
+ assertTrue(
+ "tree rooted at 200 should be over limit after 2 iterations",
+ test.isProcessTreeOverLimit(pTree, "dummyId", limit));
+
+ // the tree rooted at 600 is never over limit.
+ pTree = new ProcfsBasedProcessTree("600", true,
+ procfsRootDir.getAbsolutePath());
+ pTree.getProcessTree();
+ assertFalse("tree rooted at 600 should never be over limit.",
+ test.isProcessTreeOverLimit(pTree, "dummyId", limit));
+
+ // another iteration does not make any difference.
+ pTree.getProcessTree();
+ assertFalse("tree rooted at 600 should never be over limit.",
+ test.isProcessTreeOverLimit(pTree, "dummyId", limit));
+ } finally {
+ FileUtil.fullyDelete(procfsRootDir);
+ }
+ }
+
+ @Test
+ public void testContainerKillOnMemoryOverflow() throws IOException,
+ InterruptedException {
+
+ if (!ProcfsBasedProcessTree.isAvailable()) {
+ return;
+ }
+
+ containerManager.start();
+
+ File scriptFile = new File(tmpDir, "scriptFile.sh");
+ PrintWriter fileWriter = new PrintWriter(scriptFile);
+ File processStartFile =
+ new File(tmpDir, "start_file.txt").getAbsoluteFile();
+ fileWriter.write("\numask 0"); // So that start file is readable by the
+ // test.
+ fileWriter.write("\necho Hello World! > " + processStartFile);
+ fileWriter.write("\necho $$ >> " + processStartFile);
+ fileWriter.write("\nsleep 15");
+ fileWriter.close();
+
+ ContainerLaunchContext containerLaunchContext =
+ recordFactory.newRecordInstance(ContainerLaunchContext.class);
+
+ // ////// Construct the Container-id
+ ApplicationId appId =
+ recordFactory.newRecordInstance(ApplicationId.class);
+ ContainerId cId = recordFactory.newRecordInstance(ContainerId.class);
+ cId.setAppId(appId);
+ cId.setId(0);
+ containerLaunchContext.setContainerId(cId);
+
+ containerLaunchContext.setUser(user);
+
+ URL resource_alpha =
+ ConverterUtils.getYarnUrlFromPath(localFS
+ .makeQualified(new Path(scriptFile.getAbsolutePath())));
+ LocalResource rsrc_alpha =
+ recordFactory.newRecordInstance(LocalResource.class);
+ rsrc_alpha.setResource(resource_alpha);
+ rsrc_alpha.setSize(-1);
+ rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
+ rsrc_alpha.setType(LocalResourceType.FILE);
+ rsrc_alpha.setTimestamp(scriptFile.lastModified());
+ String destinationFile = "dest_file";
+ containerLaunchContext.setLocalResource(destinationFile, rsrc_alpha);
+ containerLaunchContext.setUser(containerLaunchContext.getUser());
+ containerLaunchContext.addCommand("/bin/bash");
+ containerLaunchContext.addCommand(scriptFile.getAbsolutePath());
+ containerLaunchContext.setResource(recordFactory
+ .newRecordInstance(Resource.class));
+ containerLaunchContext.getResource().setMemory(8 * 1024 * 1024);
+ StartContainerRequest startRequest =
+ recordFactory.newRecordInstance(StartContainerRequest.class);
+ startRequest.setContainerLaunchContext(containerLaunchContext);
+ containerManager.startContainer(startRequest);
+
+ int timeoutSecs = 0;
+ while (!processStartFile.exists() && timeoutSecs++ < 20) {
+ Thread.sleep(1000);
+ LOG.info("Waiting for process start-file to be created");
+ }
+ Assert.assertTrue("ProcessStartFile doesn't exist!",
+ processStartFile.exists());
+
+ // Now verify the contents of the file
+ BufferedReader reader =
+ new BufferedReader(new FileReader(processStartFile));
+ Assert.assertEquals("Hello World!", reader.readLine());
+ // Get the pid of the process
+ String pid = reader.readLine().trim();
+ // No more lines
+ Assert.assertEquals(null, reader.readLine());
+
+ BaseContainerManagerTest.waitForContainerState(containerManager, cId,
+ ContainerState.COMPLETE, 60);
+
+ GetContainerStatusRequest gcsRequest =
+ recordFactory.newRecordInstance(GetContainerStatusRequest.class);
+ gcsRequest.setContainerId(cId);
+ ContainerStatus containerStatus =
+ containerManager.getContainerStatus(gcsRequest).getStatus();
+ Assert.assertEquals(String.valueOf(ExitCode.KILLED.getExitCode()),
+ containerStatus.getExitStatus());
+ String expectedMsgPattern =
+ "Container \\[pid=" + pid + ",containerID=" + cId
+ + "\\] is running beyond memory-limits. Current usage : "
+ + "[0-9]*bytes. Limit : [0-9]*"
+ + "bytes. Killing container. \nDump of the process-tree for "
+ + cId + " : \n";
+ Pattern pat = Pattern.compile(expectedMsgPattern);
+ Assert.assertEquals("Expected message patterns is: " + expectedMsgPattern
+ + "\n\nObserved message is: " + containerStatus.getDiagnostics(),
+ true, pat.matcher(containerStatus.getDiagnostics()).find());
+
+ // Assert that the process is not alive anymore
+ Assert.assertFalse("Process is still alive!",
+ exec.signalContainer(user,
+ pid, Signal.NULL));
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java
new file mode 100644
index 0000000..8384220
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.metrics;
+
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import static org.apache.hadoop.test.MetricsAsserts.*;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.Records;
+
+import org.junit.Test;
+
+public class TestNodeManagerMetrics {
+ static final int GiB = 1024; // MiB
+
+ @Test public void testNames() {
+ NodeManagerMetrics metrics = NodeManagerMetrics.create();
+ Resource total = Records.newRecord(Resource.class);
+ total.setMemory(8*GiB);
+ Resource resource = Records.newRecord(Resource.class);
+ resource.setMemory(1*GiB);
+
+ metrics.addResource(total);
+
+ for (int i = 5; i-- > 0;) {
+ metrics.launchedContainer();
+ metrics.allocateContainer(resource);
+ }
+
+ metrics.initingContainer();
+ metrics.endInitingContainer();
+ metrics.runningContainer();
+ metrics.endRunningContainer();
+ metrics.completedContainer();
+ metrics.releaseContainer(resource);
+
+ metrics.failedContainer();
+ metrics.releaseContainer(resource);
+
+ metrics.killedContainer();
+ metrics.releaseContainer(resource);
+
+ metrics.initingContainer();
+ metrics.runningContainer();
+
+ checkMetrics(5, 1, 1, 1, 1, 1, 2, 2, 6);
+ }
+
+ private void checkMetrics(int launched, int completed, int failed, int killed,
+ int initing, int running, int allocatedGB,
+ int allocatedContainers, int availableGB) {
+ MetricsRecordBuilder rb = getMetrics("NodeManagerMetrics");
+ assertCounter("ContainersLaunched", launched, rb);
+ assertCounter("ContainersCompleted", completed, rb);
+ assertCounter("ContainersFailed", failed, rb);
+ assertCounter("ContainersKilled", killed, rb);
+ assertGauge("ContainersIniting", initing, rb);
+ assertGauge("ContainersRunning", running, rb);
+ assertGauge("AllocatedGB", allocatedGB, rb);
+ assertGauge("AllocatedContainers", allocatedContainers, rb);
+ assertGauge("AvailableGB", availableGB, rb);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java
new file mode 100644
index 0000000..d49a204
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java
@@ -0,0 +1,135 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.webapp;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.Writer;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.NMConfig;
+import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
+import org.apache.hadoop.yarn.server.nodemanager.ResourceView;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.junit.Before;
+import org.junit.Test;
+import static org.mockito.Mockito.*;
+
+public class TestNMWebServer {
+
+ private static final File testRootDir = new File("target",
+ TestNMWebServer.class.getSimpleName());
+
+ @Before
+ public void setup() {
+ testRootDir.mkdirs();
+ }
+
+ @Test
+ public void testNMWebApp() throws InterruptedException, IOException {
+ Context nmContext = new NodeManager.NMContext();
+ ResourceView resourceView = new ResourceView() {
+ @Override
+ public long getVmemAllocatedForContainers() {
+ return 0;
+ }
+ @Override
+ public long getPmemAllocatedForContainers() {
+ return 0;
+ }
+ };
+ WebServer server = new WebServer(nmContext, resourceView);
+ Configuration conf = new Configuration();
+ conf.set(NMConfig.NM_LOCAL_DIR, testRootDir.getAbsolutePath());
+ server.init(conf);
+ server.start();
+
+ // Add an application and the corresponding containers
+ RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(conf);
+ Dispatcher dispatcher = new AsyncDispatcher();
+ String user = "nobody";
+ long clusterTimeStamp = 1234;
+ ApplicationId appId =
+ BuilderUtils.newApplicationId(recordFactory, clusterTimeStamp, 1);
+ Application app = mock(Application.class);
+ when(app.getUser()).thenReturn(user);
+ when(app.getAppId()).thenReturn(appId);
+ nmContext.getApplications().put(appId, app);
+ ContainerId container1 =
+ BuilderUtils.newContainerId(recordFactory, appId, 0);
+ ContainerId container2 =
+ BuilderUtils.newContainerId(recordFactory, appId, 1);
+ NodeManagerMetrics metrics = mock(NodeManagerMetrics.class);
+ for (ContainerId containerId : new ContainerId[] { container1,
+ container2}) {
+ // TODO: Use builder utils
+ ContainerLaunchContext launchContext =
+ recordFactory.newRecordInstance(ContainerLaunchContext.class);
+ launchContext.setContainerId(containerId);
+ launchContext.setUser(user);
+ Container container =
+ new ContainerImpl(dispatcher, launchContext, null, metrics) {
+ @Override
+ public ContainerState getContainerState() {
+ return ContainerState.RUNNING;
+ };
+ };
+ nmContext.getContainers().put(containerId, container);
+ //TODO: Gross hack. Fix in code.
+ nmContext.getApplications().get(containerId.getAppId()).getContainers()
+ .put(containerId, container);
+ writeContainerLogs(conf, nmContext, containerId);
+
+ }
+ // TODO: Pull logs and test contents.
+// Thread.sleep(1000000);
+ }
+
+ private void writeContainerLogs(Configuration conf, Context nmContext,
+ ContainerId containerId)
+ throws IOException {
+ // ContainerLogDir should be created
+ File containerLogDir =
+ ContainerLogsPage.ContainersLogsBlock.getContainerLogDirs(conf,
+ containerId).get(0);
+ containerLogDir.mkdirs();
+ for (String fileType : new String[] { "stdout", "stderr", "syslog" }) {
+ Writer writer = new FileWriter(new File(containerLogDir, fileType));
+ writer.write(ConverterUtils.toString(containerId) + "\n Hello "
+ + fileType + "!");
+ writer.close();
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
new file mode 100644
index 0000000..66b4800
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0"?>
+<project>
+ <parent>
+ <artifactId>hadoop-yarn-server</artifactId>
+ <groupId>org.apache.hadoop</groupId>
+ <version>${yarn.version}</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
+ <name>hadoop-yarn-server-resourcemanager</name>
+
+ <properties>
+ <install.file>${project.artifact.file}</install.file>
+ <yarn.basedir>${project.parent.parent.basedir}</yarn.basedir>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-common</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>create-protobuf-generated-sources-directory</id>
+ <phase>initialize</phase>
+ <configuration>
+ <target>
+ <mkdir dir="target/generated-sources/proto" />
+ </target>
+ </configuration>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>generate-sources</id>
+ <phase>generate-sources</phase>
+ <configuration>
+ <executable>protoc</executable>
+ <arguments>
+ <argument>-Isrc/main/proto/</argument>
+ <argument>--java_out=target/generated-sources/proto</argument>
+ <argument>src/main/proto/yarn_server_resourcemanager_service_protos.proto</argument>
+ <argument>src/main/proto/RMAdminProtocol.proto</argument>
+ </arguments>
+ </configuration>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>add-source</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>add-source</goal>
+ </goals>
+ <configuration>
+ <sources>
+ <source>target/generated-sources/proto</source>
+ </sources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/security/admin/AdminSecurityInfo.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/security/admin/AdminSecurityInfo.java
new file mode 100644
index 0000000..ecc80a8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/security/admin/AdminSecurityInfo.java
@@ -0,0 +1,43 @@
+package org.apache.hadoop.yarn.security.admin;
+
+import java.lang.annotation.Annotation;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.proto.RMAdminProtocol;
+
+public class AdminSecurityInfo extends SecurityInfo {
+
+ @Override
+ public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
+ if (!protocol.equals(RMAdminProtocol.RMAdminProtocolService.BlockingInterface.class)) {
+ return null;
+ }
+ return new KerberosInfo() {
+
+ @Override
+ public Class<? extends Annotation> annotationType() {
+ return null;
+ }
+
+ @Override
+ public String serverPrincipal() {
+ return YarnConfiguration.RM_SERVER_PRINCIPAL_KEY;
+ }
+
+ @Override
+ public String clientPrincipal() {
+ return null;
+ }
+ };
+ }
+
+ @Override
+ public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
+ return null;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
new file mode 100644
index 0000000..061f4ee
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -0,0 +1,202 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.avro.ipc.Server;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.Groups;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.security.SchedulerSecurityInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocol;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.service.AbstractService;
+
+public class AdminService extends AbstractService implements RMAdminProtocol {
+
+ private static final Log LOG = LogFactory.getLog(AdminService.class);
+
+ private final Configuration conf;
+ private final ResourceScheduler scheduler;
+ private final RMContext rmContext;
+ private final NodesListManager nodesListManager;
+
+ private Server server;
+ private InetSocketAddress masterServiceAddress;
+ private AccessControlList adminAcl;
+
+ private final RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(null);
+
+ public AdminService(Configuration conf, ResourceScheduler scheduler,
+ RMContext rmContext, NodesListManager nodesListManager) {
+ super(AdminService.class.getName());
+ this.conf = conf;
+ this.scheduler = scheduler;
+ this.rmContext = rmContext;
+ this.nodesListManager = nodesListManager;
+ }
+
+ @Override
+ public void init(Configuration conf) {
+ super.init(conf);
+ String bindAddress =
+ conf.get(RMConfig.ADMIN_ADDRESS,
+ RMConfig.DEFAULT_ADMIN_BIND_ADDRESS);
+ masterServiceAddress = NetUtils.createSocketAddr(bindAddress);
+ adminAcl =
+ new AccessControlList(
+ conf.get(RMConfig.RM_ADMIN_ACL, RMConfig.DEFAULT_RM_ADMIN_ACL));
+ }
+
+ public void start() {
+ YarnRPC rpc = YarnRPC.create(getConfig());
+ Configuration serverConf = new Configuration(getConfig());
+ serverConf.setClass(
+ YarnConfiguration.YARN_SECURITY_INFO,
+ SchedulerSecurityInfo.class, SecurityInfo.class);
+ this.server =
+ rpc.getServer(RMAdminProtocol.class, this, masterServiceAddress,
+ serverConf, null,
+ serverConf.getInt(RMConfig.RM_ADMIN_THREADS,
+ RMConfig.DEFAULT_RM_ADMIN_THREADS));
+ this.server.start();
+ super.start();
+ }
+
+ @Override
+ public void stop() {
+ if (this.server != null) {
+ this.server.close();
+ }
+ super.stop();
+ }
+
+ private void checkAcls(String method) throws YarnRemoteException {
+ try {
+ UserGroupInformation user = UserGroupInformation.getCurrentUser();
+ if (!adminAcl.isUserAllowed(user)) {
+ LOG.warn("User " + user.getShortUserName() + " doesn't have permission" +
+ " to call '" + method + "'");
+
+ throw RPCUtil.getRemoteException(
+ new AccessControlException("User " + user.getShortUserName() +
+ " doesn't have permission" +
+ " to call '" + method + "'")
+ );
+ }
+
+ LOG.info("RM Admin: " + method + " invoked by user " +
+ user.getShortUserName());
+
+ } catch (IOException ioe) {
+ LOG.warn("Couldn't get current user", ioe);
+ throw RPCUtil.getRemoteException(ioe);
+ }
+ }
+
+ @Override
+ public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request)
+ throws YarnRemoteException {
+ checkAcls("refreshQueues");
+
+ try {
+ scheduler.reinitialize(conf, null, null); // ContainerTokenSecretManager can't
+ // be 'refreshed'
+ return recordFactory.newRecordInstance(RefreshQueuesResponse.class);
+ } catch (IOException ioe) {
+ LOG.info("Exception refreshing queues ", ioe);
+ throw RPCUtil.getRemoteException(ioe);
+ }
+ }
+
+ @Override
+ public RefreshNodesResponse refreshNodes(RefreshNodesRequest request)
+ throws YarnRemoteException {
+ checkAcls("refreshNodes");
+ try {
+ this.nodesListManager.refreshNodes();
+ return recordFactory.newRecordInstance(RefreshNodesResponse.class);
+ } catch (IOException ioe) {
+ LOG.info("Exception refreshing nodes ", ioe);
+ throw RPCUtil.getRemoteException(ioe);
+ }
+ }
+
+ @Override
+ public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration(
+ RefreshSuperUserGroupsConfigurationRequest request)
+ throws YarnRemoteException {
+ checkAcls("refreshSuperUserGroupsConfiguration");
+
+ ProxyUsers.refreshSuperUserGroupsConfiguration(new Configuration());
+
+ return recordFactory.newRecordInstance(
+ RefreshSuperUserGroupsConfigurationResponse.class);
+ }
+
+ @Override
+ public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings(
+ RefreshUserToGroupsMappingsRequest request) throws YarnRemoteException {
+ checkAcls("refreshUserToGroupsMappings");
+
+ Groups.getUserToGroupsMappingService().refresh();
+
+ return recordFactory.newRecordInstance(
+ RefreshUserToGroupsMappingsResponse.class);
+ }
+
+ @Override
+ public RefreshAdminAclsResponse refreshAdminAcls(
+ RefreshAdminAclsRequest request) throws YarnRemoteException {
+ checkAcls("refreshAdminAcls");
+
+ Configuration conf = new Configuration();
+ adminAcl =
+ new AccessControlList(
+ conf.get(RMConfig.RM_ADMIN_ACL, RMConfig.DEFAULT_RM_ADMIN_ACL));
+
+ return recordFactory.newRecordInstance(RefreshAdminAclsResponse.class);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationACL.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationACL.java
new file mode 100644
index 0000000..3721dde
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationACL.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import org.apache.hadoop.classification.*;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+
+/**
+ * Application related ACLs
+ */
+@InterfaceAudience.Private
+public enum ApplicationACL {
+
+ /**
+ * ACL for 'viewing' application. Dictates who can 'view' some or all of the application
+ * related details.
+ */
+ VIEW_APP(YarnConfiguration.APPLICATION_ACL_VIEW_APP),
+
+ /**
+ * ACL for 'modifying' application. Dictates who can 'modify' the application for e.g., by
+ * killing the application
+ */
+ MODIFY_APP(YarnConfiguration.APPLICATION_ACL_MODIFY_APP);
+
+ String aclName;
+
+ ApplicationACL(String name) {
+ this.aclName = name;
+ }
+
+ /**
+ * Get the name of the ACL. Here it is same as the name of the configuration
+ * property for specifying the ACL for the application.
+ *
+ * @return aclName
+ */
+ public String getAclName() {
+ return aclName;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationACLsManager.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationACLsManager.java
new file mode 100644
index 0000000..f72675f
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationACLsManager.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
+
+@InterfaceAudience.Private
+public class ApplicationACLsManager {
+
+ Configuration conf;
+
+ public ApplicationACLsManager(Configuration conf) {
+ this.conf = conf;
+ }
+
+ public boolean areACLsEnabled() {
+ return conf.getBoolean(RMConfig.RM_ACLS_ENABLED, false);
+ }
+
+ /**
+ * Construct the ApplicationACLs from the configuration so that they can be kept in
+ * the memory. If authorization is disabled on the RM, nothing is constructed
+ * and an empty map is returned.
+ *
+ * @return ApplicationACL to AccessControlList map.
+ */
+ public Map<ApplicationACL, AccessControlList> constructApplicationACLs(
+ Configuration conf) {
+
+ Map<ApplicationACL, AccessControlList> acls =
+ new HashMap<ApplicationACL, AccessControlList>();
+
+ // Don't construct anything if authorization is disabled.
+ if (!areACLsEnabled()) {
+ return acls;
+ }
+
+ for (ApplicationACL aclName : ApplicationACL.values()) {
+ String aclConfigName = aclName.getAclName();
+ String aclConfigured = conf.get(aclConfigName);
+ if (aclConfigured == null) {
+ // If ACLs are not configured at all, we grant no access to anyone. So
+ // applicationOwner and superuser/supergroup _only_ can do 'stuff'
+ aclConfigured = " ";
+ }
+ acls.put(aclName, new AccessControlList(aclConfigured));
+ }
+ return acls;
+ }
+
+ /**
+ * If authorization is enabled, checks whether the user (in the callerUGI)
+ * is authorized to perform the operation specified by 'applicationOperation' on
+ * the application by checking if the user is applicationOwner or part of application ACL for the
+ * specific application operation.
+ * <ul>
+ * <li>The owner of the application can do any operation on the application</li>
+ * <li>For all other users/groups application-acls are checked</li>
+ * </ul>
+ * @param callerUGI
+ * @param applicationOperation
+ * @param applicationOwner
+ * @param acl
+ * @throws AccessControlException
+ */
+ public boolean checkAccess(UserGroupInformation callerUGI,
+ ApplicationACL applicationOperation, String applicationOwner,
+ AccessControlList acl) {
+
+ String user = callerUGI.getShortUserName();
+ if (!areACLsEnabled()) {
+ return true;
+ }
+
+ // Allow application-owner for any operation on the application
+ if (user.equals(applicationOwner)
+ || acl.isUserAllowed(callerUGI)) {
+ return true;
+ }
+
+ return false;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
new file mode 100644
index 0000000..fe0c8b2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -0,0 +1,257 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.net.InetSocketAddress;
+import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.avro.ipc.Server;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.yarn.api.AMRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.AMResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager;
+import org.apache.hadoop.yarn.security.SchedulerSecurityInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptRegistrationEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptStatusupdateEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptUnregistrationEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
+import org.apache.hadoop.yarn.service.AbstractService;
+
+@Private
+public class ApplicationMasterService extends AbstractService implements
+ AMRMProtocol {
+ private static final Log LOG = LogFactory.getLog(ApplicationMasterService.class);
+ private final AMLivelinessMonitor amLivelinessMonitor;
+ private YarnScheduler rScheduler;
+ private ApplicationTokenSecretManager appTokenManager;
+ private InetSocketAddress masterServiceAddress;
+ private Server server;
+ private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+ private final ConcurrentMap<ApplicationAttemptId, AMResponse> responseMap =
+ new ConcurrentHashMap<ApplicationAttemptId, AMResponse>();
+ private final AMResponse reboot = recordFactory.newRecordInstance(AMResponse.class);
+ private final RMContext rmContext;
+
+ public ApplicationMasterService(RMContext rmContext,
+ ApplicationTokenSecretManager appTokenManager, YarnScheduler scheduler) {
+ super(ApplicationMasterService.class.getName());
+ this.amLivelinessMonitor = rmContext.getAMLivelinessMonitor();
+ this.appTokenManager = appTokenManager;
+ this.rScheduler = scheduler;
+ this.reboot.setReboot(true);
+// this.reboot.containers = new ArrayList<Container>();
+ this.rmContext = rmContext;
+ }
+
+ @Override
+ public void init(Configuration conf) {
+ String bindAddress =
+ conf.get(YarnConfiguration.SCHEDULER_ADDRESS,
+ YarnConfiguration.DEFAULT_SCHEDULER_BIND_ADDRESS);
+ masterServiceAddress = NetUtils.createSocketAddr(bindAddress);
+ super.init(conf);
+ }
+
+ @Override
+ public void start() {
+ YarnRPC rpc = YarnRPC.create(getConfig());
+ Configuration serverConf = new Configuration(getConfig());
+ serverConf.setClass(YarnConfiguration.YARN_SECURITY_INFO,
+ SchedulerSecurityInfo.class, SecurityInfo.class);
+ this.server =
+ rpc.getServer(AMRMProtocol.class, this, masterServiceAddress,
+ serverConf, this.appTokenManager,
+ serverConf.getInt(RMConfig.RM_AM_THREADS,
+ RMConfig.DEFAULT_RM_AM_THREADS));
+ this.server.start();
+ super.start();
+ }
+
+ @Override
+ public RegisterApplicationMasterResponse registerApplicationMaster(
+ RegisterApplicationMasterRequest request) throws YarnRemoteException {
+
+ ApplicationAttemptId applicationAttemptId = request
+ .getApplicationAttemptId();
+ AMResponse lastResponse = responseMap.get(applicationAttemptId);
+ if (lastResponse == null) {
+ String message = "Application doesn't exist in cache "
+ + applicationAttemptId;
+ LOG.error(message);
+ throw RPCUtil.getRemoteException(message);
+ }
+
+ // Allow only one thread in AM to do registerApp at a time.
+ synchronized (lastResponse) {
+
+ LOG.info("AM registration " + applicationAttemptId);
+ this.amLivelinessMonitor.receivedPing(applicationAttemptId);
+
+ this.rmContext.getDispatcher().getEventHandler().handle(
+ new RMAppAttemptRegistrationEvent(applicationAttemptId, request
+ .getHost(), request.getRpcPort(), request.getTrackingUrl()));
+
+ // Pick up min/max resource from scheduler...
+ RegisterApplicationMasterResponse response = recordFactory
+ .newRecordInstance(RegisterApplicationMasterResponse.class);
+ response.setMinimumResourceCapability(rScheduler
+ .getMinimumResourceCapability());
+ response.setMaximumResourceCapability(rScheduler
+ .getMaximumResourceCapability());
+ return response;
+ }
+ }
+
+ @Override
+ public FinishApplicationMasterResponse finishApplicationMaster(
+ FinishApplicationMasterRequest request) throws YarnRemoteException {
+
+ ApplicationAttemptId applicationAttemptId = request
+ .getApplicationAttemptId();
+ AMResponse lastResponse = responseMap.get(applicationAttemptId);
+ if (lastResponse == null) {
+ String message = "Application doesn't exist in cache "
+ + applicationAttemptId;
+ LOG.error(message);
+ throw RPCUtil.getRemoteException(message);
+ }
+
+ // Allow only one thread in AM to do finishApp at a time.
+ synchronized (lastResponse) {
+
+ this.amLivelinessMonitor.receivedPing(applicationAttemptId);
+
+ rmContext.getDispatcher().getEventHandler().handle(
+ new RMAppAttemptUnregistrationEvent(applicationAttemptId, request
+ .getTrackingUrl(), request.getFinalState(), request
+ .getDiagnostics()));
+
+ FinishApplicationMasterResponse response = recordFactory
+ .newRecordInstance(FinishApplicationMasterResponse.class);
+ return response;
+ }
+ }
+
+ @Override
+ public AllocateResponse allocate(AllocateRequest request)
+ throws YarnRemoteException {
+
+ ApplicationAttemptId appAttemptId = request.getApplicationAttemptId();
+
+ this.amLivelinessMonitor.receivedPing(appAttemptId);
+
+ /* check if its in cache */
+ AllocateResponse allocateResponse = recordFactory
+ .newRecordInstance(AllocateResponse.class);
+ AMResponse lastResponse = responseMap.get(appAttemptId);
+ if (lastResponse == null) {
+ LOG.error("AppAttemptId doesnt exist in cache " + appAttemptId);
+ allocateResponse.setAMResponse(reboot);
+ return allocateResponse;
+ }
+ if ((request.getResponseId() + 1) == lastResponse.getResponseId()) {
+ /* old heartbeat */
+ allocateResponse.setAMResponse(lastResponse);
+ return allocateResponse;
+ } else if (request.getResponseId() + 1 < lastResponse.getResponseId()) {
+ LOG.error("Invalid responseid from appAttemptId " + appAttemptId);
+ // Oh damn! Sending reboot isn't enough. RM state is corrupted. TODO:
+ allocateResponse.setAMResponse(reboot);
+ return allocateResponse;
+ }
+
+ // Allow only one thread in AM to do heartbeat at a time.
+ synchronized (lastResponse) { // BUG TODO: Locking order is screwed.
+
+ // Send the status update to the appAttempt.
+ this.rmContext.getDispatcher().getEventHandler().handle(
+ new RMAppAttemptStatusupdateEvent(appAttemptId, request
+ .getProgress()));
+
+ List<ResourceRequest> ask = request.getAskList();
+ List<ContainerId> release = request.getReleaseList();
+
+ // Send new requests to appAttempt.
+ Allocation allocation =
+ this.rScheduler.allocate(appAttemptId, ask, release);
+
+ RMApp app = this.rmContext.getRMApps().get(appAttemptId.getApplicationId());
+ RMAppAttempt appAttempt = app.getRMAppAttempt(appAttemptId);
+
+ AMResponse response = recordFactory.newRecordInstance(AMResponse.class);
+ response.addAllNewContainers(allocation.getContainers());
+ response.addAllFinishedContainers(appAttempt
+ .pullJustFinishedContainers());
+ response.setResponseId(lastResponse.getResponseId() + 1);
+ response.setAvailableResources(allocation.getResourceLimit());
+ responseMap.put(appAttemptId, response);
+ allocateResponse.setAMResponse(response);
+ return allocateResponse;
+ }
+ }
+
+ public void registerAppAttempt(ApplicationAttemptId attemptId) {
+ AMResponse response = recordFactory.newRecordInstance(AMResponse.class);
+ response.setResponseId(0);
+ responseMap.put(attemptId, response);
+ }
+
+ public void unregisterAttempt(ApplicationAttemptId attemptId) {
+ AMResponse lastResponse = responseMap.get(attemptId);
+ if (lastResponse != null) {
+ synchronized (lastResponse) {
+ responseMap.remove(attemptId);
+ }
+ }
+ }
+
+ @Override
+ public void stop() {
+ if (this.server != null) {
+ this.server.close();
+ }
+ super.stop();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
new file mode 100644
index 0000000..df7a9e6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -0,0 +1,385 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.security.AccessControlException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.avro.ipc.Server;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.yarn.api.ClientRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.security.ApplicationTokenIdentifier;
+import org.apache.hadoop.yarn.security.client.ClientRMSecurityInfo;
+import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
+import org.apache.hadoop.yarn.service.AbstractService;
+
+
+/**
+ * The client interface to the Resource Manager. This module handles all the rpc
+ * interfaces to the resource manager from the client.
+ */
+public class ClientRMService extends AbstractService implements
+ ClientRMProtocol {
+ private static final ArrayList<ApplicationReport> EMPTY_APPS_REPORT = new ArrayList<ApplicationReport>();
+
+ private static final Log LOG = LogFactory.getLog(ClientRMService.class);
+
+ final private AtomicInteger applicationCounter = new AtomicInteger(0);
+ final private YarnScheduler scheduler;
+ final private RMContext rmContext;
+ private final ApplicationMasterService masterService;
+ private final ClientToAMSecretManager clientToAMSecretManager;
+ private final AMLivelinessMonitor amLivelinessMonitor;
+
+ private String clientServiceBindAddress;
+ private Server server;
+ private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+ InetSocketAddress clientBindAddress;
+
+ private ApplicationACLsManager aclsManager;
+ private Map<ApplicationACL, AccessControlList> applicationACLs;
+
+ public ClientRMService(RMContext rmContext,
+ ClientToAMSecretManager clientToAMSecretManager,
+ YarnScheduler scheduler, ApplicationMasterService masterService) {
+ super(ClientRMService.class.getName());
+ this.scheduler = scheduler;
+ this.rmContext = rmContext;
+ this.masterService = masterService;
+ this.amLivelinessMonitor = rmContext.getAMLivelinessMonitor();
+ this.clientToAMSecretManager = clientToAMSecretManager;
+ }
+
+ @Override
+ public void init(Configuration conf) {
+ clientServiceBindAddress =
+ conf.get(YarnConfiguration.APPSMANAGER_ADDRESS,
+ YarnConfiguration.DEFAULT_APPSMANAGER_BIND_ADDRESS);
+ clientBindAddress =
+ NetUtils.createSocketAddr(clientServiceBindAddress);
+
+ this.aclsManager = new ApplicationACLsManager(conf);
+ this.applicationACLs = aclsManager.constructApplicationACLs(conf);
+
+ super.init(conf);
+ }
+
+ @Override
+ public void start() {
+ // All the clients to appsManager are supposed to be authenticated via
+ // Kerberos if security is enabled, so no secretManager.
+ YarnRPC rpc = YarnRPC.create(getConfig());
+ Configuration clientServerConf = new Configuration(getConfig());
+ clientServerConf.setClass(
+ YarnConfiguration.YARN_SECURITY_INFO,
+ ClientRMSecurityInfo.class, SecurityInfo.class);
+ this.server =
+ rpc.getServer(ClientRMProtocol.class, this,
+ clientBindAddress,
+ clientServerConf, null,
+ clientServerConf.getInt(RMConfig.RM_CLIENT_THREADS,
+ RMConfig.DEFAULT_RM_CLIENT_THREADS));
+ this.server.start();
+ super.start();
+ }
+
+ /**
+ * check if the calling user has the access to application information.
+ * @param appAttemptId
+ * @param callerUGI
+ * @param owner
+ * @param appACL
+ * @return
+ */
+ private boolean checkAccess(UserGroupInformation callerUGI, String owner, ApplicationACL appACL) {
+ if (!UserGroupInformation.isSecurityEnabled()) {
+ return true;
+ }
+ AccessControlList applicationACL = applicationACLs.get(appACL);
+ return aclsManager.checkAccess(callerUGI, appACL, owner, applicationACL);
+ }
+
+ public ApplicationId getNewApplicationId() {
+ ApplicationId applicationId = org.apache.hadoop.yarn.util.BuilderUtils
+ .newApplicationId(recordFactory, ResourceManager.clusterTimeStamp,
+ applicationCounter.incrementAndGet());
+ LOG.info("Allocated new applicationId: " + applicationId.getId());
+ return applicationId;
+ }
+
+ @Override
+ public GetNewApplicationIdResponse getNewApplicationId(
+ GetNewApplicationIdRequest request) throws YarnRemoteException {
+ GetNewApplicationIdResponse response = recordFactory
+ .newRecordInstance(GetNewApplicationIdResponse.class);
+ response.setApplicationId(getNewApplicationId());
+ return response;
+ }
+
+ @Override
+ public GetApplicationReportResponse getApplicationReport(
+ GetApplicationReportRequest request) throws YarnRemoteException {
+ ApplicationId applicationId = request.getApplicationId();
+ RMApp application = rmContext.getRMApps().get(applicationId);
+ ApplicationReport report = (application == null) ? null : application
+ .createAndGetApplicationReport();
+
+ GetApplicationReportResponse response = recordFactory
+ .newRecordInstance(GetApplicationReportResponse.class);
+ response.setApplicationReport(report);
+ return response;
+ }
+
+ @Override
+ public SubmitApplicationResponse submitApplication(
+ SubmitApplicationRequest request) throws YarnRemoteException {
+ ApplicationSubmissionContext submissionContext = request
+ .getApplicationSubmissionContext();
+ try {
+
+ ApplicationId applicationId = submissionContext.getApplicationId();
+ String clientTokenStr = null;
+ String user = UserGroupInformation.getCurrentUser().getShortUserName();
+ if (UserGroupInformation.isSecurityEnabled()) {
+ Token<ApplicationTokenIdentifier> clientToken = new Token<ApplicationTokenIdentifier>(
+ new ApplicationTokenIdentifier(applicationId),
+ this.clientToAMSecretManager);
+ clientTokenStr = clientToken.encodeToUrlString();
+ LOG.debug("Sending client token as " + clientTokenStr);
+ }
+
+ submissionContext.setQueue(submissionContext.getQueue() == null
+ ? "default" : submissionContext.getQueue());
+ submissionContext.setApplicationName(submissionContext
+ .getApplicationName() == null ? "N/A" : submissionContext
+ .getApplicationName());
+
+ ApplicationStore appStore = rmContext.getApplicationsStore()
+ .createApplicationStore(submissionContext.getApplicationId(),
+ submissionContext);
+ RMApp application = new RMAppImpl(applicationId, rmContext,
+ getConfig(), submissionContext.getApplicationName(), user,
+ submissionContext.getQueue(), submissionContext, clientTokenStr,
+ appStore, this.amLivelinessMonitor, this.scheduler,
+ this.masterService);
+ if (rmContext.getRMApps().putIfAbsent(applicationId, application) != null) {
+ throw new IOException("Application with id " + applicationId
+ + " is already present! Cannot add a duplicate!");
+ }
+
+ this.rmContext.getDispatcher().getEventHandler().handle(
+ new RMAppEvent(applicationId, RMAppEventType.START));
+
+ LOG.info("Application with id " + applicationId.getId()
+ + " submitted by user " + user + " with " + submissionContext);
+ } catch (IOException ie) {
+ LOG.info("Exception in submitting application", ie);
+ throw RPCUtil.getRemoteException(ie);
+ }
+
+ SubmitApplicationResponse response = recordFactory
+ .newRecordInstance(SubmitApplicationResponse.class);
+ return response;
+ }
+
+ @Override
+ public FinishApplicationResponse finishApplication(
+ FinishApplicationRequest request) throws YarnRemoteException {
+
+ ApplicationId applicationId = request.getApplicationId();
+
+ UserGroupInformation callerUGI;
+ try {
+ callerUGI = UserGroupInformation.getCurrentUser();
+ } catch (IOException ie) {
+ LOG.info("Error getting UGI ", ie);
+ throw RPCUtil.getRemoteException(ie);
+ }
+
+ RMApp application = this.rmContext.getRMApps().get(applicationId);
+ // TODO: What if null
+ if (!checkAccess(callerUGI, application.getUser(),
+ ApplicationACL.MODIFY_APP)) {
+ throw RPCUtil.getRemoteException(new AccessControlException("User "
+ + callerUGI.getShortUserName() + " cannot perform operation "
+ + ApplicationACL.MODIFY_APP.name() + " on " + applicationId));
+ }
+
+ this.rmContext.getDispatcher().getEventHandler().handle(
+ new RMAppEvent(applicationId, RMAppEventType.KILL));
+
+ FinishApplicationResponse response = recordFactory
+ .newRecordInstance(FinishApplicationResponse.class);
+ return response;
+ }
+
+ @Override
+ public GetClusterMetricsResponse getClusterMetrics(
+ GetClusterMetricsRequest request) throws YarnRemoteException {
+ GetClusterMetricsResponse response = recordFactory
+ .newRecordInstance(GetClusterMetricsResponse.class);
+ YarnClusterMetrics ymetrics = recordFactory
+ .newRecordInstance(YarnClusterMetrics.class);
+ ymetrics.setNumNodeManagers(this.rmContext.getRMNodes().size());
+ response.setClusterMetrics(ymetrics);
+ return response;
+ }
+
+ @Override
+ public GetAllApplicationsResponse getAllApplications(
+ GetAllApplicationsRequest request) throws YarnRemoteException {
+
+ List<ApplicationReport> reports = new ArrayList<ApplicationReport>();
+ for (RMApp application : this.rmContext.getRMApps().values()) {
+ reports.add(application.createAndGetApplicationReport());
+ }
+
+ GetAllApplicationsResponse response =
+ recordFactory.newRecordInstance(GetAllApplicationsResponse.class);
+ response.setApplicationList(reports);
+ return response;
+ }
+
+ @Override
+ public GetClusterNodesResponse getClusterNodes(GetClusterNodesRequest request)
+ throws YarnRemoteException {
+ GetClusterNodesResponse response =
+ recordFactory.newRecordInstance(GetClusterNodesResponse.class);
+ Collection<RMNode> nodes = this.rmContext.getRMNodes().values();
+ List<NodeReport> nodeReports = new ArrayList<NodeReport>(nodes.size());
+ for (RMNode nodeInfo : nodes) {
+ nodeReports.add(createNodeReports(nodeInfo));
+ }
+ response.setNodeReports(nodeReports);
+ return response;
+ }
+
+ @Override
+ public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request)
+ throws YarnRemoteException {
+ GetQueueInfoResponse response =
+ recordFactory.newRecordInstance(GetQueueInfoResponse.class);
+ try {
+ QueueInfo queueInfo =
+ scheduler.getQueueInfo(request.getQueueName(),
+ request.getIncludeChildQueues(),
+ request.getRecursive());
+ List<ApplicationReport> appReports = EMPTY_APPS_REPORT;
+ if (request.getIncludeApplications()) {
+ Collection<RMApp> apps = this.rmContext.getRMApps().values();
+ appReports = new ArrayList<ApplicationReport>(
+ apps.size());
+ for (RMApp app : apps) {
+ appReports.add(app.createAndGetApplicationReport());
+ }
+ }
+ queueInfo.setApplications(appReports);
+ response.setQueueInfo(queueInfo);
+ } catch (IOException ioe) {
+ LOG.info("Failed to getQueueInfo for " + request.getQueueName(), ioe);
+ throw RPCUtil.getRemoteException(ioe);
+ }
+
+ return response;
+ }
+
+ private NodeReport createNodeReports(RMNode rmNode) {
+ NodeReport report = recordFactory.newRecordInstance(NodeReport.class);
+ report.setNodeId(rmNode.getNodeID());
+ report.setRackName(rmNode.getRackName());
+ report.setCapability(rmNode.getTotalCapability());
+ report.setNodeHealthStatus(rmNode.getNodeHealthStatus());
+ org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport schedulerNodeReport = scheduler
+ .getNodeReport(rmNode.getNodeID());
+ report.setUsed(schedulerNodeReport.getUsedResources());
+ report.setNumContainers(schedulerNodeReport.getNumContainers());
+ return report;
+ }
+
+ @Override
+ public GetQueueUserAclsInfoResponse getQueueUserAcls(
+ GetQueueUserAclsInfoRequest request) throws YarnRemoteException {
+ GetQueueUserAclsInfoResponse response =
+ recordFactory.newRecordInstance(GetQueueUserAclsInfoResponse.class);
+ response.setUserAclsInfoList(scheduler.getQueueUserAclInfo());
+ return response;
+ }
+
+ @Override
+ public void stop() {
+ if (this.server != null) {
+ this.server.close();
+ }
+ super.stop();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NMLivelinessMonitor.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NMLivelinessMonitor.java
new file mode 100644
index 0000000..17e8d52
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NMLivelinessMonitor.java
@@ -0,0 +1,35 @@
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.SystemClock;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
+import org.apache.hadoop.yarn.util.AbstractLivelinessMonitor;
+
+public class NMLivelinessMonitor extends AbstractLivelinessMonitor<NodeId> {
+
+ private EventHandler dispatcher;
+
+ public NMLivelinessMonitor(Dispatcher d) {
+ super("NMLivelinessMonitor", new SystemClock());
+ this.dispatcher = d.getEventHandler();
+ }
+
+ public void init(Configuration conf) {
+ super.init(conf);
+ setExpireInterval(conf.getInt(RMConfig.NM_EXPIRY_INTERVAL,
+ RMConfig.DEFAULT_NM_EXPIRY_INTERVAL));
+ setMonitorInterval(conf.getInt(
+ RMConfig.NMLIVELINESS_MONITORING_INTERVAL,
+ RMConfig.DEFAULT_NMLIVELINESS_MONITORING_INTERVAL));
+ }
+
+ @Override
+ protected void expire(NodeId id) {
+ dispatcher.handle(
+ new RMNodeEvent(id, RMNodeEventType.EXPIRE));
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
new file mode 100644
index 0000000..0f87eeb
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
@@ -0,0 +1,86 @@
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.io.IOException;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.HostsFileReader;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.service.AbstractService;
+
+public class NodesListManager extends AbstractService{
+
+ private static final Log LOG = LogFactory.getLog(NodesListManager.class);
+
+ private HostsFileReader hostsReader;
+ private Configuration conf;
+
+ public NodesListManager() {
+ super(NodesListManager.class.getName());
+ }
+
+ @Override
+ public void init(Configuration conf) {
+
+ this.conf = conf;
+
+ // Read the hosts/exclude files to restrict access to the RM
+ try {
+ this.hostsReader =
+ new HostsFileReader(
+ conf.get(RMConfig.RM_NODES_INCLUDE_FILE,
+ RMConfig.DEFAULT_RM_NODES_INCLUDE_FILE),
+ conf.get(RMConfig.RM_NODES_EXCLUDE_FILE,
+ RMConfig.DEFAULT_RM_NODES_EXCLUDE_FILE)
+ );
+ printConfiguredHosts();
+ } catch (IOException ioe) {
+ LOG.warn("Failed to init hostsReader, disabling", ioe);
+ try {
+ this.hostsReader =
+ new HostsFileReader(RMConfig.DEFAULT_RM_NODES_INCLUDE_FILE,
+ RMConfig.DEFAULT_RM_NODES_EXCLUDE_FILE);
+ } catch (IOException ioe2) {
+ // Should *never* happen
+ this.hostsReader = null;
+ throw new YarnException(ioe2);
+ }
+ }
+ super.init(conf);
+ }
+
+ private void printConfiguredHosts() {
+ if (!LOG.isDebugEnabled()) {
+ return;
+ }
+
+ LOG.debug("hostsReader: in=" + conf.get(RMConfig.RM_NODES_INCLUDE_FILE,
+ RMConfig.DEFAULT_RM_NODES_INCLUDE_FILE) + " out=" +
+ conf.get(RMConfig.RM_NODES_EXCLUDE_FILE,
+ RMConfig.DEFAULT_RM_NODES_EXCLUDE_FILE));
+ for (String include : hostsReader.getHosts()) {
+ LOG.debug("include: " + include);
+ }
+ for (String exclude : hostsReader.getExcludedHosts()) {
+ LOG.debug("exclude: " + exclude);
+ }
+ }
+
+ public void refreshNodes() throws IOException {
+ synchronized (hostsReader) {
+ hostsReader.refresh();
+ printConfiguredHosts();
+ }
+ }
+
+ public boolean isValidNode(String hostName) {
+ synchronized (hostsReader) {
+ Set<String> hostsList = hostsReader.getHosts();
+ Set<String> excludeList = hostsReader.getExcludedHosts();
+ return ((hostsList.isEmpty() || hostsList.contains(hostName)) &&
+ !excludeList.contains(hostName));
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMConfig.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMConfig.java
new file mode 100644
index 0000000..bf0fbcf
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMConfig.java
@@ -0,0 +1,88 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+
+public class RMConfig {
+ public static final String RM_KEYTAB = YarnConfiguration.RM_PREFIX
+ + "keytab";
+ public static final String ZK_ADDRESS = YarnConfiguration.RM_PREFIX
+ + "zookeeper.address";
+ public static final String ZK_SESSION_TIMEOUT = YarnConfiguration.RM_PREFIX
+ + "zookeeper.session.timeout";
+ public static final String ADMIN_ADDRESS = YarnConfiguration.RM_PREFIX
+ + "admin.address";
+ public static final String AM_MAX_RETRIES = YarnConfiguration.RM_PREFIX
+ + "application.max.retries";
+ public static final int DEFAULT_ZK_TIMEOUT = 60000;
+ public static final int DEFAULT_AM_MAX_RETRIES = 3;
+ public static final int DEFAULT_AM_EXPIRY_INTERVAL = 600000;
+ public static final String NM_EXPIRY_INTERVAL = YarnConfiguration.RM_PREFIX
+ + "nodemanager.expiry.interval";
+ public static final int DEFAULT_NM_EXPIRY_INTERVAL = 600000;
+ public static final String DEFAULT_ADMIN_BIND_ADDRESS = "0.0.0.0:8141";
+ public static final String RESOURCE_SCHEDULER = YarnConfiguration.RM_PREFIX
+ + "scheduler";
+ public static final String RM_STORE = YarnConfiguration.RM_PREFIX + "store";
+ public static final String AMLIVELINESS_MONITORING_INTERVAL =
+ YarnConfiguration.RM_PREFIX
+ + "amliveliness-monitor.monitoring-interval";
+ public static final int DEFAULT_AMLIVELINESS_MONITORING_INTERVAL = 1000;
+ public static final String CONTAINER_LIVELINESS_MONITORING_INTERVAL
+ = YarnConfiguration.RM_PREFIX
+ + "amliveliness-monitor.monitoring-interval";
+ public static final int DEFAULT_CONTAINER_LIVELINESS_MONITORING_INTERVAL = 600000;
+ public static final String NMLIVELINESS_MONITORING_INTERVAL =
+ YarnConfiguration.RM_PREFIX
+ + "nmliveliness-monitor.monitoring-interval";
+ public static final int DEFAULT_NMLIVELINESS_MONITORING_INTERVAL = 1000;
+
+ public static final String RM_RESOURCE_TRACKER_THREADS =
+ YarnConfiguration.RM_PREFIX + "resource.tracker.threads";
+ public static final int DEFAULT_RM_RESOURCE_TRACKER_THREADS = 10;
+
+ public static final String RM_CLIENT_THREADS =
+ YarnConfiguration.RM_PREFIX + "client.threads";
+ public static final int DEFAULT_RM_CLIENT_THREADS = 10;
+
+ public static final String RM_AM_THREADS =
+ YarnConfiguration.RM_PREFIX + "am.threads";
+ public static final int DEFAULT_RM_AM_THREADS = 10;
+
+ public static final String RM_ADMIN_THREADS =
+ YarnConfiguration.RM_PREFIX + "admin.threads";
+ public static final int DEFAULT_RM_ADMIN_THREADS = 1;
+
+ /* key for looking up the acls configuration for acls checking for application */
+ public static final String RM_ACLS_ENABLED = YarnConfiguration.RM_PREFIX +
+ "acls.enabled";
+
+ public static final String RM_ADMIN_ACL =
+ YarnConfiguration.RM_PREFIX + "admin.acl";
+ public static final String DEFAULT_RM_ADMIN_ACL = "*";
+
+ public static final String RM_NODES_INCLUDE_FILE =
+ YarnConfiguration.RM_PREFIX + "nodes.include";
+ public static final String DEFAULT_RM_NODES_INCLUDE_FILE = "";
+
+ public static final String RM_NODES_EXCLUDE_FILE =
+ YarnConfiguration.RM_PREFIX + "nodes.exclude";
+ public static final String DEFAULT_RM_NODES_EXCLUDE_FILE = "";
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
new file mode 100644
index 0000000..d5a62a5
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
@@ -0,0 +1,30 @@
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.NodeStore;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+
+public interface RMContext {
+
+ Dispatcher getDispatcher();
+
+ NodeStore getNodeStore();
+
+ ApplicationsStore getApplicationsStore();
+
+ ConcurrentMap<ApplicationId, RMApp> getRMApps();
+
+ ConcurrentMap<NodeId, RMNode> getRMNodes();
+
+ AMLivelinessMonitor getAMLivelinessMonitor();
+
+ ContainerAllocationExpirer getContainerAllocationExpirer();
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
new file mode 100644
index 0000000..c4e6d34
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
@@ -0,0 +1,76 @@
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.NodeStore;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+
+public class RMContextImpl implements RMContext {
+
+ private final Dispatcher rmDispatcher;
+ private final Store store;
+
+ private final ConcurrentMap<ApplicationId, RMApp> applications
+ = new ConcurrentHashMap<ApplicationId, RMApp>();
+
+ private final ConcurrentMap<NodeId, RMNode> nodes
+ = new ConcurrentHashMap<NodeId, RMNode>();
+
+ private AMLivelinessMonitor amLivelinessMonitor;
+ private ContainerAllocationExpirer containerAllocationExpirer;
+
+ public RMContextImpl(Store store, Dispatcher rmDispatcher,
+ ContainerAllocationExpirer containerAllocationExpirer,
+ AMLivelinessMonitor amLivelinessMonitor) {
+ this.store = store;
+ this.rmDispatcher = rmDispatcher;
+ this.containerAllocationExpirer = containerAllocationExpirer;
+ this.amLivelinessMonitor = amLivelinessMonitor;
+ }
+
+ @Override
+ public Dispatcher getDispatcher() {
+ return this.rmDispatcher;
+ }
+
+ @Override
+ public NodeStore getNodeStore() {
+ return store;
+ }
+
+ @Override
+ public ApplicationsStore getApplicationsStore() {
+ return store;
+ }
+
+ @Override
+ public ConcurrentMap<ApplicationId, RMApp> getRMApps() {
+ return this.applications;
+ }
+
+ @Override
+ public ConcurrentMap<NodeId, RMNode> getRMNodes() {
+ return this.nodes;
+ }
+
+ @Override
+ public ContainerAllocationExpirer getContainerAllocationExpirer() {
+ return this.containerAllocationExpirer;
+ }
+
+ @Override
+ public AMLivelinessMonitor getAMLivelinessMonitor() {
+ return this.amLivelinessMonitor;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
new file mode 100644
index 0000000..029e810
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -0,0 +1,497 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+
+import java.io.IOException;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager;
+import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store.RMState;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebApp;
+import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
+import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.service.CompositeService;
+import org.apache.hadoop.yarn.service.Service;
+import org.apache.hadoop.yarn.webapp.WebApp;
+import org.apache.hadoop.yarn.webapp.WebApps;
+
+/**
+ * The ResourceManager is the main class that is a set of components.
+ *
+ */
+public class ResourceManager extends CompositeService implements Recoverable {
+ private static final Log LOG = LogFactory.getLog(ResourceManager.class);
+ public static final long clusterTimeStamp = System.currentTimeMillis();
+ private YarnConfiguration conf;
+
+ protected ClientToAMSecretManager clientToAMSecretManager =
+ new ClientToAMSecretManager();
+
+ protected ContainerTokenSecretManager containerTokenSecretManager =
+ new ContainerTokenSecretManager();
+
+ protected ApplicationTokenSecretManager appTokenSecretManager =
+ new ApplicationTokenSecretManager();
+
+ private Dispatcher rmDispatcher;
+
+ protected ResourceScheduler scheduler;
+ private ClientRMService clientRM;
+ protected ApplicationMasterService masterService;
+ private ApplicationMasterLauncher applicationMasterLauncher;
+ private AdminService adminService;
+ private ContainerAllocationExpirer containerAllocationExpirer;
+ protected NMLivelinessMonitor nmLivelinessMonitor;
+ protected NodesListManager nodesListManager;
+ private SchedulerEventDispatcher schedulerDispatcher;
+
+ private final AtomicBoolean shutdown = new AtomicBoolean(false);
+ private WebApp webApp;
+ private RMContext rmContext;
+ private final Store store;
+ protected ResourceTrackerService resourceTracker;
+
+ public ResourceManager(Store store) {
+ super("ResourceManager");
+ this.store = store;
+ this.nodesListManager = new NodesListManager();
+ }
+
+ public RMContext getRMContext() {
+ return this.rmContext;
+ }
+
+ @Override
+ public synchronized void init(Configuration conf) {
+
+ this.rmDispatcher = new AsyncDispatcher();
+ addIfService(this.rmDispatcher);
+
+ this.containerAllocationExpirer = new ContainerAllocationExpirer(
+ this.rmDispatcher);
+ addService(this.containerAllocationExpirer);
+
+ AMLivelinessMonitor amLivelinessMonitor = createAMLivelinessMonitor();
+ addService(amLivelinessMonitor);
+
+ this.rmContext = new RMContextImpl(this.store, this.rmDispatcher,
+ this.containerAllocationExpirer, amLivelinessMonitor);
+
+ addService(nodesListManager);
+
+ // Initialize the config
+ this.conf = new YarnConfiguration(conf);
+ // Initialize the scheduler
+ this.scheduler = createScheduler();
+ this.schedulerDispatcher = new SchedulerEventDispatcher(this.scheduler);
+ addService(this.schedulerDispatcher);
+ this.rmDispatcher.register(SchedulerEventType.class,
+ this.schedulerDispatcher);
+
+ // Register event handler for RmAppEvents
+ this.rmDispatcher.register(RMAppEventType.class,
+ new ApplicationEventDispatcher(this.rmContext));
+
+ // Register event handler for RmAppAttemptEvents
+ this.rmDispatcher.register(RMAppAttemptEventType.class,
+ new ApplicationAttemptEventDispatcher(this.rmContext));
+
+ // Register event handler for RmNodes
+ this.rmDispatcher.register(RMNodeEventType.class,
+ new NodeEventDispatcher(this.rmContext));
+
+ //TODO change this to be random
+ this.appTokenSecretManager.setMasterKey(ApplicationTokenSecretManager
+ .createSecretKey("Dummy".getBytes()));
+
+ this.nmLivelinessMonitor = createNMLivelinessMonitor();
+ addService(this.nmLivelinessMonitor);
+
+ this.resourceTracker = createResourceTrackerService();
+ addService(resourceTracker);
+
+ try {
+ this.scheduler.reinitialize(this.conf,
+ this.containerTokenSecretManager, this.rmContext);
+ } catch (IOException ioe) {
+ throw new RuntimeException("Failed to initialize scheduler", ioe);
+ }
+
+ masterService = createApplicationMasterService();
+ addService(masterService) ;
+
+ clientRM = createClientRMService();
+ addService(clientRM);
+
+ adminService = createAdminService();
+ addService(adminService);
+
+ this.applicationMasterLauncher = createAMLauncher();
+ addService(applicationMasterLauncher);
+
+ super.init(conf);
+ }
+
+ protected void addIfService(Object object) {
+ if (object instanceof Service) {
+ addService((Service) object);
+ }
+ }
+
+ protected ResourceScheduler createScheduler() {
+ return
+ ReflectionUtils.newInstance(
+ conf.getClass(RMConfig.RESOURCE_SCHEDULER,
+ FifoScheduler.class, ResourceScheduler.class),
+ this.conf);
+ }
+
+ protected ApplicationMasterLauncher createAMLauncher() {
+ return new ApplicationMasterLauncher(
+ this.appTokenSecretManager, this.clientToAMSecretManager,
+ this.rmContext);
+ }
+
+ private NMLivelinessMonitor createNMLivelinessMonitor() {
+ return new NMLivelinessMonitor(this.rmContext
+ .getDispatcher());
+ }
+
+ protected AMLivelinessMonitor createAMLivelinessMonitor() {
+ return new AMLivelinessMonitor(this.rmDispatcher);
+ }
+
+ @Private
+ public static final class SchedulerEventDispatcher extends AbstractService
+ implements EventHandler<SchedulerEvent> {
+
+ private final ResourceScheduler scheduler;
+ private final BlockingQueue<SchedulerEvent> eventQueue =
+ new LinkedBlockingQueue<SchedulerEvent>();
+ private final Thread eventProcessor;
+
+ public SchedulerEventDispatcher(ResourceScheduler scheduler) {
+ super(SchedulerEventDispatcher.class.getName());
+ this.scheduler = scheduler;
+ this.eventProcessor = new Thread(new EventProcessor());
+ }
+
+ @Override
+ public synchronized void start() {
+ this.eventProcessor.start();
+ super.start();
+ }
+
+ private final class EventProcessor implements Runnable {
+ @Override
+ public void run() {
+
+ SchedulerEvent event;
+
+ while (!Thread.currentThread().isInterrupted()) {
+ try {
+ event = eventQueue.take();
+ } catch (InterruptedException e) {
+ LOG.error("Returning, interrupted : " + e);
+ return; // TODO: Kill RM.
+ }
+
+ try {
+ scheduler.handle(event);
+ } catch (Throwable t) {
+ LOG.error("Error in handling event type " + event.getType()
+ + " to the scheduler", t);
+ return; // TODO: Kill RM.
+ }
+ }
+ }
+ }
+
+ @Override
+ public synchronized void stop() {
+ this.eventProcessor.interrupt();
+ try {
+ this.eventProcessor.join();
+ } catch (InterruptedException e) {
+ throw new YarnException(e);
+ }
+ super.stop();
+ }
+
+ @Override
+ public void handle(SchedulerEvent event) {
+ try {
+ int qSize = eventQueue.size();
+ if (qSize !=0 && qSize %1000 == 0) {
+ LOG.info("Size of scheduler event-queue is " + qSize);
+ }
+ int remCapacity = eventQueue.remainingCapacity();
+ if (remCapacity < 1000) {
+ LOG.info("Very low remaining capacity on scheduler event queue: "
+ + remCapacity);
+ }
+ this.eventQueue.put(event);
+ } catch (InterruptedException e) {
+ throw new YarnException(e);
+ }
+ }
+ }
+
+ @Private
+ public static final class ApplicationEventDispatcher implements
+ EventHandler<RMAppEvent> {
+
+ private final RMContext rmContext;
+
+ public ApplicationEventDispatcher(RMContext rmContext) {
+ this.rmContext = rmContext;
+ }
+
+ @Override
+ public void handle(RMAppEvent event) {
+ ApplicationId appID = event.getApplicationId();
+ RMApp rmApp = this.rmContext.getRMApps().get(appID);
+ if (rmApp != null) {
+ try {
+ rmApp.handle(event);
+ } catch (Throwable t) {
+ LOG.error("Error in handling event type " + event.getType()
+ + " for application " + appID, t);
+ }
+ }
+ }
+ }
+
+ @Private
+ public static final class ApplicationAttemptEventDispatcher implements
+ EventHandler<RMAppAttemptEvent> {
+
+ private final RMContext rmContext;
+
+ public ApplicationAttemptEventDispatcher(RMContext rmContext) {
+ this.rmContext = rmContext;
+ }
+
+ @Override
+ public void handle(RMAppAttemptEvent event) {
+ ApplicationAttemptId appAttemptID = event.getApplicationAttemptId();
+ ApplicationId appAttemptId = appAttemptID.getApplicationId();
+ RMApp rmApp = this.rmContext.getRMApps().get(appAttemptId);
+ if (rmApp != null) {
+ RMAppAttempt rmAppAttempt = rmApp.getRMAppAttempt(appAttemptID);
+ if (rmAppAttempt != null) {
+ try {
+ rmAppAttempt.handle(event);
+ } catch (Throwable t) {
+ LOG.error("Error in handling event type " + event.getType()
+ + " for applicationAttempt " + appAttemptId, t);
+ }
+ }
+ }
+ }
+ }
+
+ @Private
+ public static final class NodeEventDispatcher implements
+ EventHandler<RMNodeEvent> {
+
+ private final RMContext rmContext;
+
+ public NodeEventDispatcher(RMContext rmContext) {
+ this.rmContext = rmContext;
+ }
+
+ @Override
+ public void handle(RMNodeEvent event) {
+ NodeId nodeId = event.getNodeId();
+ RMNode node = this.rmContext.getRMNodes().get(nodeId);
+ if (node != null) {
+ try {
+ ((EventHandler<RMNodeEvent>) node).handle(event);
+ } catch (Throwable t) {
+ LOG.error("Error in handling event type " + event.getType()
+ + " for node " + nodeId, t);
+ }
+ }
+ }
+ }
+
+ protected void startWepApp() {
+ webApp = WebApps.$for("yarn", masterService).at(
+ conf.get(YarnConfiguration.RM_WEBAPP_BIND_ADDRESS,
+ YarnConfiguration.DEFAULT_RM_WEBAPP_BIND_ADDRESS)).
+ start(new RMWebApp(this));
+
+ }
+
+ @Override
+ public void start() {
+ try {
+ doSecureLogin();
+ } catch(IOException ie) {
+ throw new YarnException("Failed to login", ie);
+ }
+
+ startWepApp();
+ DefaultMetricsSystem.initialize("ResourceManager");
+
+ super.start();
+
+ /*synchronized(shutdown) {
+ try {
+ while(!shutdown.get()) {
+ shutdown.wait();
+ }
+ } catch(InterruptedException ie) {
+ LOG.info("Interrupted while waiting", ie);
+ }
+ }*/
+ }
+
+ protected void doSecureLogin() throws IOException {
+ SecurityUtil.login(conf, RMConfig.RM_KEYTAB,
+ YarnConfiguration.RM_SERVER_PRINCIPAL_KEY);
+ }
+
+ @Override
+ public void stop() {
+ if (webApp != null) {
+ webApp.stop();
+ }
+
+ /*synchronized(shutdown) {
+ shutdown.set(true);
+ shutdown.notifyAll();
+ }*/
+
+ DefaultMetricsSystem.shutdown();
+
+ super.stop();
+ }
+
+ protected ResourceTrackerService createResourceTrackerService() {
+ return new ResourceTrackerService(this.rmContext, this.nodesListManager,
+ this.nmLivelinessMonitor, this.containerTokenSecretManager);
+ }
+
+ protected ClientRMService createClientRMService() {
+ return new ClientRMService(this.rmContext, this.clientToAMSecretManager,
+ scheduler, masterService);
+ }
+
+ protected ApplicationMasterService createApplicationMasterService() {
+ return new ApplicationMasterService(this.rmContext,
+ this.appTokenSecretManager, scheduler);
+ }
+
+
+ protected AdminService createAdminService() {
+ return new AdminService(conf, scheduler, rmContext, this.nodesListManager);
+ }
+
+ @Private
+ public ClientRMService getClientRMService() {
+ return this.clientRM;
+ }
+
+ /**
+ * return the scheduler.
+ * @return
+ */
+ @Private
+ public ResourceScheduler getResourceScheduler() {
+ return this.scheduler;
+ }
+
+ /**
+ * return the resource tracking component.
+ * @return
+ */
+ @Private
+ public ResourceTrackerService getResourceTrackerService() {
+ return this.resourceTracker;
+ }
+
+ @Private
+ public ApplicationMasterService getApplicationMasterService() {
+ return this.masterService;
+ }
+
+ @Override
+ public void recover(RMState state) throws Exception {
+ resourceTracker.recover(state);
+ scheduler.recover(state);
+ }
+
+ public static void main(String argv[]) {
+ ResourceManager resourceManager = null;
+ try {
+ Configuration conf = new YarnConfiguration();
+ Store store = StoreFactory.getStore(conf);
+ resourceManager = new ResourceManager(store);
+ resourceManager.init(conf);
+ //resourceManager.recover(store.restore());
+ //store.doneWithRecovery();
+ resourceManager.start();
+ } catch (Throwable e) {
+ LOG.error("Error starting RM", e);
+ if (resourceManager != null) {
+ resourceManager.stop();
+ }
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
new file mode 100644
index 0000000..cfab347
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
@@ -0,0 +1,298 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.ByteBuffer;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import javax.crypto.SecretKey;
+
+import org.apache.avro.ipc.Server;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.net.Node;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.server.RMNMSecurityInfoClass;
+import org.apache.hadoop.yarn.server.YarnServerConfig;
+import org.apache.hadoop.yarn.server.api.ResourceTracker;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
+import org.apache.hadoop.yarn.server.api.records.NodeStatus;
+import org.apache.hadoop.yarn.server.api.records.RegistrationResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store.RMState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStatusEvent;
+import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
+import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.util.RackResolver;
+
+public class ResourceTrackerService extends AbstractService implements
+ ResourceTracker {
+
+ private static final Log LOG = LogFactory.getLog(ResourceTrackerService.class);
+
+ private static final RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(null);
+
+ private final RMContext rmContext;
+ private final NodesListManager nodesListManager;
+ private final NMLivelinessMonitor nmLivelinessMonitor;
+ private final ContainerTokenSecretManager containerTokenSecretManager;
+
+ private Server server;
+ private InetSocketAddress resourceTrackerAddress;
+
+ private static final NodeHeartbeatResponse reboot = recordFactory
+ .newRecordInstance(NodeHeartbeatResponse.class);
+ static {
+ HeartbeatResponse rebootResp = recordFactory
+ .newRecordInstance(HeartbeatResponse.class);
+ rebootResp.setReboot(true);
+ reboot.setHeartbeatResponse(rebootResp);
+ }
+
+ public ResourceTrackerService(RMContext rmContext,
+ NodesListManager nodesListManager,
+ NMLivelinessMonitor nmLivelinessMonitor,
+ ContainerTokenSecretManager containerTokenSecretManager) {
+ super(ResourceTrackerService.class.getName());
+ this.rmContext = rmContext;
+ this.nodesListManager = nodesListManager;
+ this.nmLivelinessMonitor = nmLivelinessMonitor;
+ this.containerTokenSecretManager = containerTokenSecretManager;
+ }
+
+ @Override
+ public synchronized void init(Configuration conf) {
+ String resourceTrackerBindAddress =
+ conf.get(YarnServerConfig.RESOURCETRACKER_ADDRESS,
+ YarnServerConfig.DEFAULT_RESOURCETRACKER_BIND_ADDRESS);
+ resourceTrackerAddress = NetUtils.createSocketAddr(resourceTrackerBindAddress);
+
+ RackResolver.init(conf);
+ super.init(conf);
+ }
+
+ @Override
+ public synchronized void start() {
+ super.start();
+ // ResourceTrackerServer authenticates NodeManager via Kerberos if
+ // security is enabled, so no secretManager.
+ YarnRPC rpc = YarnRPC.create(getConfig());
+ Configuration rtServerConf = new Configuration(getConfig());
+ rtServerConf.setClass(
+ YarnConfiguration.YARN_SECURITY_INFO,
+ RMNMSecurityInfoClass.class, SecurityInfo.class);
+ this.server =
+ rpc.getServer(ResourceTracker.class, this, resourceTrackerAddress,
+ rtServerConf, null,
+ rtServerConf.getInt(RMConfig.RM_RESOURCE_TRACKER_THREADS,
+ RMConfig.DEFAULT_RM_RESOURCE_TRACKER_THREADS));
+ this.server.start();
+
+ }
+
+ @Override
+ public synchronized void stop() {
+ if (this.server != null) {
+ this.server.close();
+ }
+ super.stop();
+ }
+
+ @Override
+ public RegisterNodeManagerResponse registerNodeManager(
+ RegisterNodeManagerRequest request) throws YarnRemoteException {
+
+ NodeId nodeId = request.getNodeId();
+ String host = nodeId.getHost();
+ int cmPort = nodeId.getPort();
+ int httpPort = request.getHttpPort();
+ Resource capability = request.getResource();
+
+ try {
+ // Check if this node is a 'valid' node
+ if (!this.nodesListManager.isValidNode(host)) {
+ LOG.info("Disallowed NodeManager from " + host);
+ throw new IOException("Disallowed NodeManager from " + host);
+ }
+
+ RMNode rmNode = new RMNodeImpl(nodeId, rmContext, host, cmPort,
+ httpPort, resolve(host), capability);
+
+ if (this.rmContext.getRMNodes().putIfAbsent(nodeId, rmNode) != null) {
+ throw new IOException("Duplicate registration from the node!");
+ }
+
+ this.nmLivelinessMonitor.register(nodeId);
+
+ LOG.info("NodeManager from node " + host +
+ "(cmPort: " + cmPort + " httpPort: " + httpPort + ") "
+ + "registered with capability: " + capability.getMemory()
+ + ", assigned nodeId " + nodeId);
+
+ RegistrationResponse regResponse = recordFactory.newRecordInstance(
+ RegistrationResponse.class);
+ SecretKey secretKey = this.containerTokenSecretManager
+ .createAndGetSecretKey(nodeId.toString());
+ regResponse.setSecretKey(ByteBuffer.wrap(secretKey.getEncoded()));
+
+ RegisterNodeManagerResponse response = recordFactory
+ .newRecordInstance(RegisterNodeManagerResponse.class);
+ response.setRegistrationResponse(regResponse);
+ return response;
+ } catch (IOException ioe) {
+ LOG.info("Exception in node registration from " + nodeId.getHost(), ioe);
+ throw RPCUtil.getRemoteException(ioe);
+ }
+ }
+
+ @Override
+ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
+ throws YarnRemoteException {
+
+ NodeStatus remoteNodeStatus = request.getNodeStatus();
+ try {
+ /**
+ * Here is the node heartbeat sequence...
+ * 1. Check if it's a registered node
+ * 2. Check if it's a valid (i.e. not excluded) node
+ * 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat
+ * 4. Send healthStatus to RMNode
+ */
+
+ NodeId nodeId = remoteNodeStatus.getNodeId();
+
+ // 1. Check if it's a registered node
+ RMNode rmNode = this.rmContext.getRMNodes().get(nodeId);
+ if (rmNode == null) {
+ /* node does not exist */
+ LOG.info("Node not found rebooting " + remoteNodeStatus.getNodeId());
+ return reboot;
+ }
+
+ // Send ping
+ this.nmLivelinessMonitor.receivedPing(nodeId);
+
+ // 2. Check if it's a valid (i.e. not excluded) node
+ if (!this.nodesListManager.isValidNode(rmNode.getHostName())) {
+ LOG.info("Disallowed NodeManager nodeId: " + nodeId +
+ " hostname: " + rmNode.getNodeAddress());
+ throw new IOException("Disallowed NodeManager nodeId: " +
+ remoteNodeStatus.getNodeId());
+ }
+
+ NodeHeartbeatResponse nodeHeartBeatResponse = recordFactory
+ .newRecordInstance(NodeHeartbeatResponse.class);
+
+ // 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat
+ HeartbeatResponse lastHeartbeatResponse = rmNode
+ .getLastHeartBeatResponse();
+ if (remoteNodeStatus.getResponseId() + 1 == lastHeartbeatResponse
+ .getResponseId()) {
+ LOG.info("Received duplicate heartbeat from node " +
+ rmNode.getNodeAddress());
+ nodeHeartBeatResponse.setHeartbeatResponse(lastHeartbeatResponse);
+ return nodeHeartBeatResponse;
+ } else if (remoteNodeStatus.getResponseId() + 1 < lastHeartbeatResponse
+ .getResponseId()) {
+ LOG.info("Too far behind rm response id:" +
+ lastHeartbeatResponse.getResponseId() + " nm response id:"
+ + remoteNodeStatus.getResponseId());
+ // TODO: Just sending reboot is not enough. Think more.
+ this.rmContext.getDispatcher().getEventHandler().handle(
+ new RMNodeEvent(nodeId, RMNodeEventType.REBOOTING));
+ return reboot;
+ }
+
+ // Heartbeat response
+ HeartbeatResponse latestResponse = recordFactory
+ .newRecordInstance(HeartbeatResponse.class);
+ latestResponse
+ .setResponseId(lastHeartbeatResponse.getResponseId() + 1);
+ latestResponse.addAllContainersToCleanup(rmNode.pullContainersToCleanUp());
+ latestResponse.addAllApplicationsToCleanup(rmNode.pullAppsToCleanup());
+
+ // 4. Send status to RMNode, saving the latest response.
+ this.rmContext.getDispatcher().getEventHandler().handle(
+ new RMNodeStatusEvent(nodeId, remoteNodeStatus.getNodeHealthStatus(),
+ remoteNodeStatus.getAllContainers(), latestResponse));
+
+ nodeHeartBeatResponse.setHeartbeatResponse(latestResponse);
+ return nodeHeartBeatResponse;
+ } catch (IOException ioe) {
+ LOG.info("Exception in heartbeat from node " +
+ request.getNodeStatus().getNodeId(), ioe);
+ throw RPCUtil.getRemoteException(ioe);
+ }
+ }
+
+ public void recover(RMState state) {
+//
+// List<RMNode> nodeManagers = state.getStoredNodeManagers();
+// for (RMNode nm : nodeManagers) {
+// createNewNode(nm.getNodeID(), nm.getNodeHostName(), nm
+// .getCommandPort(), nm.getHttpPort(), nm.getNode(), nm
+// .getTotalCapability());
+// }
+// for (Map.Entry<ApplicationId, ApplicationInfo> entry : state
+// .getStoredApplications().entrySet()) {
+// List<Container> containers = entry.getValue().getContainers();
+// List<Container> containersToAdd = new ArrayList<Container>();
+// for (Container c : containers) {
+// RMNode containerNode = this.rmContext.getNodesCollection()
+// .getNodeInfo(c.getNodeId());
+// containersToAdd.add(c);
+// containerNode.allocateContainer(entry.getKey(), containersToAdd);
+// containersToAdd.clear();
+// }
+// }
+ }
+
+ /**
+ * resolving the network topology.
+ * @param hostName the hostname of this node.
+ * @return the resolved {@link Node} for this nodemanager.
+ */
+ public static Node resolve(String hostName) {
+ return RackResolver.resolve(hostName);
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
new file mode 100644
index 0000000..6eae783
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
@@ -0,0 +1,289 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.amlauncher;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.ByteBuffer;
+import java.security.PrivilegedAction;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import javax.crypto.SecretKey;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.io.DataInputByteBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerToken;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.security.ApplicationTokenIdentifier;
+import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager;
+import org.apache.hadoop.yarn.security.ContainerManagerSecurityInfo;
+import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
+import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptLaunchFailedEvent;
+
+/**
+ * The launch of the AM itself.
+ */
+public class AMLauncher implements Runnable {
+
+ private static final Log LOG = LogFactory.getLog(AMLauncher.class);
+
+ private ContainerManager containerMgrProxy;
+
+ private final RMAppAttempt application;
+ private final Configuration conf;
+ private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+ private final ApplicationTokenSecretManager applicationTokenSecretManager;
+ private final ClientToAMSecretManager clientToAMSecretManager;
+ private final AMLauncherEventType eventType;
+
+ @SuppressWarnings("rawtypes")
+ private final EventHandler handler;
+
+ @SuppressWarnings("unchecked")
+ public AMLauncher(RMContext rmContext, RMAppAttempt application,
+ AMLauncherEventType eventType,ApplicationTokenSecretManager applicationTokenSecretManager,
+ ClientToAMSecretManager clientToAMSecretManager, Configuration conf) {
+ this.application = application;
+ this.conf = new Configuration(conf); // Just not to touch the sec-info class
+ this.applicationTokenSecretManager = applicationTokenSecretManager;
+ this.clientToAMSecretManager = clientToAMSecretManager;
+ this.conf.setClass(
+ YarnConfiguration.YARN_SECURITY_INFO,
+ ContainerManagerSecurityInfo.class, SecurityInfo.class);
+ this.eventType = eventType;
+ this.handler = rmContext.getDispatcher().getEventHandler();
+ }
+
+ private void connect() throws IOException {
+ ContainerId masterContainerID = application.getMasterContainer().getId();
+
+ containerMgrProxy =
+ getContainerMgrProxy(masterContainerID.getAppId());
+ }
+
+ private void launch() throws IOException {
+ connect();
+ ContainerId masterContainerID = application.getMasterContainer().getId();
+ ApplicationSubmissionContext applicationContext =
+ application.getSubmissionContext();
+ LOG.info("Setting up container " + application.getMasterContainer()
+ + " for AM " + application.getAppAttemptId());
+ ContainerLaunchContext launchContext =
+ createAMContainerLaunchContext(applicationContext, masterContainerID);
+ StartContainerRequest request = recordFactory.newRecordInstance(StartContainerRequest.class);
+ request.setContainerLaunchContext(launchContext);
+ containerMgrProxy.startContainer(request);
+ LOG.info("Done launching container " + application.getMasterContainer()
+ + " for AM " + application.getAppAttemptId());
+ }
+
+ private void cleanup() throws IOException {
+ connect();
+ ContainerId containerId = application.getMasterContainer().getId();
+ StopContainerRequest stopRequest = recordFactory.newRecordInstance(StopContainerRequest.class);
+ stopRequest.setContainerId(containerId);
+ containerMgrProxy.stopContainer(stopRequest);
+ }
+
+ private ContainerManager getContainerMgrProxy(
+ final ApplicationId applicationID) throws IOException {
+
+ Container container = application.getMasterContainer();
+
+ final String containerManagerBindAddress = container.getNodeId().toString();
+
+ final YarnRPC rpc = YarnRPC.create(conf); // TODO: Don't create again and again.
+
+ UserGroupInformation currentUser =
+ UserGroupInformation.createRemoteUser("TODO"); // TODO
+ if (UserGroupInformation.isSecurityEnabled()) {
+ ContainerToken containerToken = container.getContainerToken();
+ Token<ContainerTokenIdentifier> token =
+ new Token<ContainerTokenIdentifier>(
+ containerToken.getIdentifier().array(),
+ containerToken.getPassword().array(), new Text(
+ containerToken.getKind()), new Text(
+ containerToken.getService()));
+ currentUser.addToken(token);
+ }
+ return currentUser.doAs(new PrivilegedAction<ContainerManager>() {
+ @Override
+ public ContainerManager run() {
+ return (ContainerManager) rpc.getProxy(ContainerManager.class,
+ NetUtils.createSocketAddr(containerManagerBindAddress), conf);
+ }
+ });
+ }
+
+ private ContainerLaunchContext createAMContainerLaunchContext(
+ ApplicationSubmissionContext applicationMasterContext,
+ ContainerId containerID) throws IOException {
+
+ // Construct the actual Container
+ ContainerLaunchContext container = recordFactory.newRecordInstance(ContainerLaunchContext.class);
+ container.addAllCommands(applicationMasterContext.getCommandList());
+ StringBuilder mergedCommand = new StringBuilder();
+ String failCount = Integer.toString(application.getAppAttemptId()
+ .getAttemptId());
+ List<String> commandList = new ArrayList<String>();
+ for (String str : container.getCommandList()) {
+ // This is out-right wrong. AM FAIL count should be passed via env.
+ String result =
+ str.replaceFirst(ApplicationConstants.AM_FAIL_COUNT_STRING,
+ failCount);
+ mergedCommand.append(result).append(" ");
+ commandList.add(result);
+ }
+ container.clearCommands();
+ container.addAllCommands(commandList);
+ /** add the failed count to the app master command line */
+
+ LOG.info("Command to launch container " +
+ containerID + " : " + mergedCommand);
+ container.addAllEnv(applicationMasterContext.getAllEnvironment());
+
+ container.addAllEnv(setupTokensInEnv(applicationMasterContext));
+
+ // Construct the actual Container
+ container.setContainerId(containerID);
+ container.setUser(applicationMasterContext.getUser());
+ container.setResource(applicationMasterContext.getMasterCapability());
+ container.addAllLocalResources(applicationMasterContext.getAllResourcesTodo());
+ container.setContainerTokens(applicationMasterContext.getFsTokensTodo());
+ return container;
+ }
+
+ private Map<String, String> setupTokensInEnv(
+ ApplicationSubmissionContext asc)
+ throws IOException {
+ Map<String, String> env =
+ new HashMap<String, String>();
+ if (UserGroupInformation.isSecurityEnabled()) {
+ // TODO: Security enabled/disabled info should come from RM.
+
+ Credentials credentials = new Credentials();
+
+ DataInputByteBuffer dibb = new DataInputByteBuffer();
+ if (asc.getFsTokensTodo() != null) {
+ // TODO: Don't do this kind of checks everywhere.
+ dibb.reset(asc.getFsTokensTodo());
+ credentials.readTokenStorageStream(dibb);
+ }
+
+ ApplicationTokenIdentifier id = new ApplicationTokenIdentifier(
+ application.getAppAttemptId().getApplicationId());
+ Token<ApplicationTokenIdentifier> token =
+ new Token<ApplicationTokenIdentifier>(id,
+ this.applicationTokenSecretManager);
+ String schedulerAddressStr =
+ this.conf.get(YarnConfiguration.SCHEDULER_ADDRESS,
+ YarnConfiguration.DEFAULT_SCHEDULER_BIND_ADDRESS);
+ InetSocketAddress unresolvedAddr =
+ NetUtils.createSocketAddr(schedulerAddressStr);
+ String resolvedAddr =
+ unresolvedAddr.getAddress().getHostAddress() + ":"
+ + unresolvedAddr.getPort();
+ token.setService(new Text(resolvedAddr));
+ String appMasterTokenEncoded = token.encodeToUrlString();
+ LOG.debug("Putting appMaster token in env : " + appMasterTokenEncoded);
+ env.put(ApplicationConstants.APPLICATION_MASTER_TOKEN_ENV_NAME,
+ appMasterTokenEncoded);
+
+ // Add the RM token
+ credentials.addToken(new Text(resolvedAddr), token);
+ DataOutputBuffer dob = new DataOutputBuffer();
+ credentials.writeTokenStorageToStream(dob);
+ asc.setFsTokensTodo(ByteBuffer.wrap(dob.getData(), 0, dob.getLength()));
+
+ ApplicationTokenIdentifier identifier = new ApplicationTokenIdentifier(
+ application.getAppAttemptId().getApplicationId());
+ SecretKey clientSecretKey =
+ this.clientToAMSecretManager.getMasterKey(identifier);
+ String encoded =
+ Base64.encodeBase64URLSafeString(clientSecretKey.getEncoded());
+ LOG.debug("The encoded client secret-key to be put in env : " + encoded);
+ env.put(ApplicationConstants.APPLICATION_CLIENT_SECRET_ENV_NAME, encoded);
+ }
+ return env;
+ }
+
+ @SuppressWarnings("unchecked")
+ public void run() {
+ switch (eventType) {
+ case LAUNCH:
+ try {
+ LOG.info("Launching master" + application.getAppAttemptId());
+ launch();
+ handler.handle(new RMAppAttemptEvent(application.getAppAttemptId(),
+ RMAppAttemptEventType.LAUNCHED));
+ } catch(Exception ie) {
+ String message = "Error launching " + application.getAppAttemptId()
+ + ". Got exception: " + StringUtils.stringifyException(ie);
+ LOG.info(message);
+ handler.handle(new RMAppAttemptLaunchFailedEvent(application
+ .getAppAttemptId(), message));
+ }
+ break;
+ case CLEANUP:
+ try {
+ LOG.info("Cleaning master " + application.getAppAttemptId());
+ cleanup();
+ } catch(IOException ie) {
+ LOG.info("Error cleaning master ", ie);
+ }
+ break;
+ default:
+ LOG.warn("Received unknown event-type " + eventType + ". Ignoring.");
+ break;
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncherEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncherEvent.java
new file mode 100644
index 0000000..b625302
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncherEvent.java
@@ -0,0 +1,19 @@
+package org.apache.hadoop.yarn.server.resourcemanager.amlauncher;
+
+import org.apache.hadoop.yarn.event.AbstractEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+
+public class AMLauncherEvent extends AbstractEvent<AMLauncherEventType> {
+
+ private final RMAppAttempt appAttempt;
+
+ public AMLauncherEvent(AMLauncherEventType type, RMAppAttempt appAttempt) {
+ super(type);
+ this.appAttempt = appAttempt;
+ }
+
+ public RMAppAttempt getAppAttempt() {
+ return this.appAttempt;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncherEventType.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncherEventType.java
new file mode 100644
index 0000000..abc2269
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncherEventType.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.amlauncher;
+
+public enum AMLauncherEventType {
+ LAUNCH,
+ CLEANUP
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java
new file mode 100644
index 0000000..5f12348
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java
@@ -0,0 +1,128 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.amlauncher;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager;
+import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.service.AbstractService;
+
+
+public class ApplicationMasterLauncher extends AbstractService implements
+ EventHandler<AMLauncherEvent> {
+ private static final Log LOG = LogFactory.getLog(
+ ApplicationMasterLauncher.class);
+ private final ThreadPoolExecutor launcherPool;
+ private final EventHandler handler;
+ private LauncherThread launcherHandlingThread;
+
+ private final BlockingQueue<Runnable> masterEvents
+ = new LinkedBlockingQueue<Runnable>();
+
+ private ApplicationTokenSecretManager applicationTokenSecretManager;
+ private ClientToAMSecretManager clientToAMSecretManager;
+ private final RMContext context;
+
+ public ApplicationMasterLauncher(ApplicationTokenSecretManager
+ applicationTokenSecretManager, ClientToAMSecretManager clientToAMSecretManager,
+ RMContext context) {
+ super(ApplicationMasterLauncher.class.getName());
+ this.context = context;
+ this.handler = context.getDispatcher().getEventHandler();
+ /* register to dispatcher */
+ this.context.getDispatcher().register(AMLauncherEventType.class, this);
+ this.launcherPool = new ThreadPoolExecutor(1, 10, 1,
+ TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>());
+ this.launcherHandlingThread = new LauncherThread();
+ this.applicationTokenSecretManager = applicationTokenSecretManager;
+ this.clientToAMSecretManager = clientToAMSecretManager;
+ }
+
+ public void start() {
+ launcherHandlingThread.start();
+ super.start();
+ }
+
+ protected Runnable createRunnableLauncher(RMAppAttempt application, AMLauncherEventType event) {
+ Runnable launcher = new AMLauncher(context, application, event,
+ applicationTokenSecretManager, clientToAMSecretManager, getConfig());
+ return launcher;
+ }
+
+ private void launch(RMAppAttempt application) {
+ Runnable launcher = createRunnableLauncher(application, AMLauncherEventType.LAUNCH);
+ masterEvents.add(launcher);
+ }
+
+
+ public void stop() {
+ launcherHandlingThread.interrupt();
+ try {
+ launcherHandlingThread.join();
+ } catch (InterruptedException ie) {
+ LOG.info(launcherHandlingThread.getName() + " interrupted during join ",
+ ie); }
+ launcherPool.shutdown();
+ super.stop();
+ }
+
+ private class LauncherThread extends Thread {
+ @Override
+ public void run() {
+ while (!this.isInterrupted()) {
+ Runnable toLaunch;
+ try {
+ toLaunch = masterEvents.take();
+ launcherPool.execute(toLaunch);
+ } catch (InterruptedException e) {
+ LOG.warn(this.getClass().getName() + " interrupted. Returning.");
+ return;
+ }
+ }
+ }
+ }
+
+ private void cleanup(RMAppAttempt application) {
+ Runnable launcher = createRunnableLauncher(application, AMLauncherEventType.CLEANUP);
+ masterEvents.add(launcher);
+ }
+
+ @Override
+ public synchronized void handle(AMLauncherEvent appEvent) {
+ AMLauncherEventType event = appEvent.getType();
+ RMAppAttempt application = appEvent.getAppAttempt();
+ switch (event) {
+ case LAUNCH:
+ launch(application);
+ break;
+ case CLEANUP:
+ cleanup(application);
+ default:
+ break;
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocol.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocol.java
new file mode 100644
index 0000000..c1578a6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocol.java
@@ -0,0 +1,52 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api;
+
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
+
+public interface RMAdminProtocol {
+ public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request)
+ throws YarnRemoteException;
+
+ public RefreshNodesResponse refreshNodes(RefreshNodesRequest request)
+ throws YarnRemoteException;
+
+ public RefreshSuperUserGroupsConfigurationResponse
+ refreshSuperUserGroupsConfiguration(
+ RefreshSuperUserGroupsConfigurationRequest request)
+ throws YarnRemoteException;
+
+ public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings(
+ RefreshUserToGroupsMappingsRequest request)
+ throws YarnRemoteException;
+
+ public RefreshAdminAclsResponse refreshAdminAcls(
+ RefreshAdminAclsRequest request)
+ throws YarnRemoteException;
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/client/RMAdminProtocolPBClientImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/client/RMAdminProtocolPBClientImpl.java
new file mode 100644
index 0000000..6e8ca26
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/client/RMAdminProtocolPBClientImpl.java
@@ -0,0 +1,169 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.impl.pb.client;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine;
+import org.apache.hadoop.yarn.proto.RMAdminProtocol.RMAdminProtocolService;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto;
+import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocol;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshQueuesResponsePBImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationRequestPBImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsRequestPBImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsResponsePBImpl;
+
+import com.google.protobuf.ServiceException;
+
+
+public class RMAdminProtocolPBClientImpl implements RMAdminProtocol {
+
+ private RMAdminProtocolService.BlockingInterface proxy;
+
+ public RMAdminProtocolPBClientImpl(long clientVersion, InetSocketAddress addr,
+ Configuration conf) throws IOException {
+ RPC.setProtocolEngine(conf, RMAdminProtocolService.BlockingInterface.class,
+ ProtoOverHadoopRpcEngine.class);
+ proxy = (RMAdminProtocolService.BlockingInterface)RPC.getProxy(
+ RMAdminProtocolService.BlockingInterface.class, clientVersion, addr, conf);
+ }
+
+ @Override
+ public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request)
+ throws YarnRemoteException {
+ RefreshQueuesRequestProto requestProto =
+ ((RefreshQueuesRequestPBImpl)request).getProto();
+ try {
+ return new RefreshQueuesResponsePBImpl(
+ proxy.refreshQueues(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public RefreshNodesResponse refreshNodes(RefreshNodesRequest request)
+ throws YarnRemoteException {
+ RefreshNodesRequestProto requestProto =
+ ((RefreshNodesRequestPBImpl)request).getProto();
+ try {
+ return new RefreshNodesResponsePBImpl(
+ proxy.refreshNodes(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration(
+ RefreshSuperUserGroupsConfigurationRequest request)
+ throws YarnRemoteException {
+ RefreshSuperUserGroupsConfigurationRequestProto requestProto =
+ ((RefreshSuperUserGroupsConfigurationRequestPBImpl)request).getProto();
+ try {
+ return new RefreshSuperUserGroupsConfigurationResponsePBImpl(
+ proxy.refreshSuperUserGroupsConfiguration(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings(
+ RefreshUserToGroupsMappingsRequest request) throws YarnRemoteException {
+ RefreshUserToGroupsMappingsRequestProto requestProto =
+ ((RefreshUserToGroupsMappingsRequestPBImpl)request).getProto();
+ try {
+ return new RefreshUserToGroupsMappingsResponsePBImpl(
+ proxy.refreshUserToGroupsMappings(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+ @Override
+ public RefreshAdminAclsResponse refreshAdminAcls(
+ RefreshAdminAclsRequest request) throws YarnRemoteException {
+ RefreshAdminAclsRequestProto requestProto =
+ ((RefreshAdminAclsRequestPBImpl)request).getProto();
+ try {
+ return new RefreshAdminAclsResponsePBImpl(
+ proxy.refreshAdminAcls(null, requestProto));
+ } catch (ServiceException e) {
+ if (e.getCause() instanceof YarnRemoteException) {
+ throw (YarnRemoteException)e.getCause();
+ } else if (e.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)e.getCause();
+ } else {
+ throw new UndeclaredThrowableException(e);
+ }
+ }
+ }
+
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java
new file mode 100644
index 0000000..4d0dde2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java
@@ -0,0 +1,122 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.impl.pb.service;
+
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.proto.RMAdminProtocol.RMAdminProtocolService.BlockingInterface;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.*;
+import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocol;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshQueuesResponsePBImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationRequestPBImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsRequestPBImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsResponsePBImpl;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+public class RMAdminProtocolPBServiceImpl implements BlockingInterface {
+
+ private RMAdminProtocol real;
+
+ public RMAdminProtocolPBServiceImpl(RMAdminProtocol impl) {
+ this.real = impl;
+ }
+
+ @Override
+ public RefreshQueuesResponseProto refreshQueues(RpcController controller,
+ RefreshQueuesRequestProto proto) throws ServiceException {
+ RefreshQueuesRequestPBImpl request = new RefreshQueuesRequestPBImpl(proto);
+ try {
+ RefreshQueuesResponse response = real.refreshQueues(request);
+ return ((RefreshQueuesResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public RefreshAdminAclsResponseProto refreshAdminAcls(
+ RpcController controller, RefreshAdminAclsRequestProto proto)
+ throws ServiceException {
+ RefreshAdminAclsRequestPBImpl request =
+ new RefreshAdminAclsRequestPBImpl(proto);
+ try {
+ RefreshAdminAclsResponse response = real.refreshAdminAcls(request);
+ return ((RefreshAdminAclsResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public RefreshNodesResponseProto refreshNodes(RpcController controller,
+ RefreshNodesRequestProto proto) throws ServiceException {
+ RefreshNodesRequestPBImpl request = new RefreshNodesRequestPBImpl(proto);
+ try {
+ RefreshNodesResponse response = real.refreshNodes(request);
+ return ((RefreshNodesResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public RefreshSuperUserGroupsConfigurationResponseProto
+ refreshSuperUserGroupsConfiguration(
+ RpcController controller,
+ RefreshSuperUserGroupsConfigurationRequestProto proto)
+ throws ServiceException {
+ RefreshSuperUserGroupsConfigurationRequestPBImpl request =
+ new RefreshSuperUserGroupsConfigurationRequestPBImpl(proto);
+ try {
+ RefreshSuperUserGroupsConfigurationResponse response =
+ real.refreshSuperUserGroupsConfiguration(request);
+ return ((RefreshSuperUserGroupsConfigurationResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public RefreshUserToGroupsMappingsResponseProto refreshUserToGroupsMappings(
+ RpcController controller, RefreshUserToGroupsMappingsRequestProto proto)
+ throws ServiceException {
+ RefreshUserToGroupsMappingsRequestPBImpl request =
+ new RefreshUserToGroupsMappingsRequestPBImpl(proto);
+ try {
+ RefreshUserToGroupsMappingsResponse response =
+ real.refreshUserToGroupsMappings(request);
+ return ((RefreshUserToGroupsMappingsResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshAdminAclsRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshAdminAclsRequest.java
new file mode 100644
index 0000000..135f942
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshAdminAclsRequest.java
@@ -0,0 +1,23 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+
+public interface RefreshAdminAclsRequest {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshAdminAclsResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshAdminAclsResponse.java
new file mode 100644
index 0000000..12cdd03
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshAdminAclsResponse.java
@@ -0,0 +1,23 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+
+public interface RefreshAdminAclsResponse {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshNodesRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshNodesRequest.java
new file mode 100644
index 0000000..c0f86e0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshNodesRequest.java
@@ -0,0 +1,23 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+
+public interface RefreshNodesRequest {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshNodesResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshNodesResponse.java
new file mode 100644
index 0000000..f265439
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshNodesResponse.java
@@ -0,0 +1,23 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+
+public interface RefreshNodesResponse {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshQueuesRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshQueuesRequest.java
new file mode 100644
index 0000000..5c52536
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshQueuesRequest.java
@@ -0,0 +1,23 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+
+public interface RefreshQueuesRequest {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshQueuesResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshQueuesResponse.java
new file mode 100644
index 0000000..ee3c1e9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshQueuesResponse.java
@@ -0,0 +1,23 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+
+public interface RefreshQueuesResponse {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshSuperUserGroupsConfigurationRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshSuperUserGroupsConfigurationRequest.java
new file mode 100644
index 0000000..0779c71
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshSuperUserGroupsConfigurationRequest.java
@@ -0,0 +1,23 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+
+public interface RefreshSuperUserGroupsConfigurationRequest {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshSuperUserGroupsConfigurationResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshSuperUserGroupsConfigurationResponse.java
new file mode 100644
index 0000000..edbbfdd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshSuperUserGroupsConfigurationResponse.java
@@ -0,0 +1,23 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+
+public interface RefreshSuperUserGroupsConfigurationResponse {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshUserToGroupsMappingsRequest.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshUserToGroupsMappingsRequest.java
new file mode 100644
index 0000000..cc11a22
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshUserToGroupsMappingsRequest.java
@@ -0,0 +1,23 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+
+public interface RefreshUserToGroupsMappingsRequest {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshUserToGroupsMappingsResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshUserToGroupsMappingsResponse.java
new file mode 100644
index 0000000..231bac9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshUserToGroupsMappingsResponse.java
@@ -0,0 +1,23 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+
+public interface RefreshUserToGroupsMappingsResponse {
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshAdminAclsRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshAdminAclsRequestPBImpl.java
new file mode 100644
index 0000000..c0926aa
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshAdminAclsRequestPBImpl.java
@@ -0,0 +1,47 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsRequest;
+
+public class RefreshAdminAclsRequestPBImpl
+extends ProtoBase<RefreshAdminAclsRequestProto>
+implements RefreshAdminAclsRequest {
+
+ RefreshAdminAclsRequestProto proto = RefreshAdminAclsRequestProto.getDefaultInstance();
+ RefreshAdminAclsRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public RefreshAdminAclsRequestPBImpl() {
+ builder = RefreshAdminAclsRequestProto.newBuilder();
+ }
+
+ public RefreshAdminAclsRequestPBImpl(RefreshAdminAclsRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public RefreshAdminAclsRequestProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshAdminAclsResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshAdminAclsResponsePBImpl.java
new file mode 100644
index 0000000..752d688
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshAdminAclsResponsePBImpl.java
@@ -0,0 +1,46 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsResponseProto;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsResponse;
+
+public class RefreshAdminAclsResponsePBImpl extends ProtoBase<RefreshAdminAclsResponseProto>
+implements RefreshAdminAclsResponse {
+
+ RefreshAdminAclsResponseProto proto = RefreshAdminAclsResponseProto.getDefaultInstance();
+ RefreshAdminAclsResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public RefreshAdminAclsResponsePBImpl() {
+ builder = RefreshAdminAclsResponseProto.newBuilder();
+ }
+
+ public RefreshAdminAclsResponsePBImpl(RefreshAdminAclsResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public RefreshAdminAclsResponseProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshNodesRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshNodesRequestPBImpl.java
new file mode 100644
index 0000000..e1dc4d9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshNodesRequestPBImpl.java
@@ -0,0 +1,46 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesRequest;
+
+public class RefreshNodesRequestPBImpl extends ProtoBase<RefreshNodesRequestProto>
+implements RefreshNodesRequest {
+
+ RefreshNodesRequestProto proto = RefreshNodesRequestProto.getDefaultInstance();
+ RefreshNodesRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public RefreshNodesRequestPBImpl() {
+ builder = RefreshNodesRequestProto.newBuilder();
+ }
+
+ public RefreshNodesRequestPBImpl(RefreshNodesRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public RefreshNodesRequestProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshNodesResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshNodesResponsePBImpl.java
new file mode 100644
index 0000000..83978ec
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshNodesResponsePBImpl.java
@@ -0,0 +1,46 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResponseProto;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesResponse;
+
+public class RefreshNodesResponsePBImpl extends ProtoBase<RefreshNodesResponseProto>
+implements RefreshNodesResponse {
+
+ RefreshNodesResponseProto proto = RefreshNodesResponseProto.getDefaultInstance();
+ RefreshNodesResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public RefreshNodesResponsePBImpl() {
+ builder = RefreshNodesResponseProto.newBuilder();
+ }
+
+ public RefreshNodesResponsePBImpl(RefreshNodesResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public RefreshNodesResponseProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshQueuesRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshQueuesRequestPBImpl.java
new file mode 100644
index 0000000..b190e54
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshQueuesRequestPBImpl.java
@@ -0,0 +1,46 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesRequest;
+
+public class RefreshQueuesRequestPBImpl extends ProtoBase<RefreshQueuesRequestProto>
+implements RefreshQueuesRequest {
+
+ RefreshQueuesRequestProto proto = RefreshQueuesRequestProto.getDefaultInstance();
+ RefreshQueuesRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public RefreshQueuesRequestPBImpl() {
+ builder = RefreshQueuesRequestProto.newBuilder();
+ }
+
+ public RefreshQueuesRequestPBImpl(RefreshQueuesRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public RefreshQueuesRequestProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshQueuesResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshQueuesResponsePBImpl.java
new file mode 100644
index 0000000..c2997ca
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshQueuesResponsePBImpl.java
@@ -0,0 +1,46 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesResponseProto;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesResponse;
+
+public class RefreshQueuesResponsePBImpl extends ProtoBase<RefreshQueuesResponseProto>
+implements RefreshQueuesResponse {
+
+ RefreshQueuesResponseProto proto = RefreshQueuesResponseProto.getDefaultInstance();
+ RefreshQueuesResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public RefreshQueuesResponsePBImpl() {
+ builder = RefreshQueuesResponseProto.newBuilder();
+ }
+
+ public RefreshQueuesResponsePBImpl(RefreshQueuesResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public RefreshQueuesResponseProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationRequestPBImpl.java
new file mode 100644
index 0000000..24c33f1
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationRequestPBImpl.java
@@ -0,0 +1,47 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
+
+public class RefreshSuperUserGroupsConfigurationRequestPBImpl
+extends ProtoBase<RefreshSuperUserGroupsConfigurationRequestProto>
+implements RefreshSuperUserGroupsConfigurationRequest {
+
+ RefreshSuperUserGroupsConfigurationRequestProto proto = RefreshSuperUserGroupsConfigurationRequestProto.getDefaultInstance();
+ RefreshSuperUserGroupsConfigurationRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public RefreshSuperUserGroupsConfigurationRequestPBImpl() {
+ builder = RefreshSuperUserGroupsConfigurationRequestProto.newBuilder();
+ }
+
+ public RefreshSuperUserGroupsConfigurationRequestPBImpl(RefreshSuperUserGroupsConfigurationRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public RefreshSuperUserGroupsConfigurationRequestProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationResponsePBImpl.java
new file mode 100644
index 0000000..5f9194b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationResponsePBImpl.java
@@ -0,0 +1,46 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationResponseProto;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
+
+public class RefreshSuperUserGroupsConfigurationResponsePBImpl extends ProtoBase<RefreshSuperUserGroupsConfigurationResponseProto>
+implements RefreshSuperUserGroupsConfigurationResponse {
+
+ RefreshSuperUserGroupsConfigurationResponseProto proto = RefreshSuperUserGroupsConfigurationResponseProto.getDefaultInstance();
+ RefreshSuperUserGroupsConfigurationResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public RefreshSuperUserGroupsConfigurationResponsePBImpl() {
+ builder = RefreshSuperUserGroupsConfigurationResponseProto.newBuilder();
+ }
+
+ public RefreshSuperUserGroupsConfigurationResponsePBImpl(RefreshSuperUserGroupsConfigurationResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public RefreshSuperUserGroupsConfigurationResponseProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsRequestPBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsRequestPBImpl.java
new file mode 100644
index 0000000..611dc0c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsRequestPBImpl.java
@@ -0,0 +1,47 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
+
+public class RefreshUserToGroupsMappingsRequestPBImpl
+extends ProtoBase<RefreshUserToGroupsMappingsRequestProto>
+implements RefreshUserToGroupsMappingsRequest {
+
+ RefreshUserToGroupsMappingsRequestProto proto = RefreshUserToGroupsMappingsRequestProto.getDefaultInstance();
+ RefreshUserToGroupsMappingsRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public RefreshUserToGroupsMappingsRequestPBImpl() {
+ builder = RefreshUserToGroupsMappingsRequestProto.newBuilder();
+ }
+
+ public RefreshUserToGroupsMappingsRequestPBImpl(RefreshUserToGroupsMappingsRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public RefreshUserToGroupsMappingsRequestProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsResponsePBImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsResponsePBImpl.java
new file mode 100644
index 0000000..8a09e82
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsResponsePBImpl.java
@@ -0,0 +1,46 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsResponseProto;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
+
+public class RefreshUserToGroupsMappingsResponsePBImpl extends ProtoBase<RefreshUserToGroupsMappingsResponseProto>
+implements RefreshUserToGroupsMappingsResponse {
+
+ RefreshUserToGroupsMappingsResponseProto proto = RefreshUserToGroupsMappingsResponseProto.getDefaultInstance();
+ RefreshUserToGroupsMappingsResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public RefreshUserToGroupsMappingsResponsePBImpl() {
+ builder = RefreshUserToGroupsMappingsResponseProto.newBuilder();
+ }
+
+ public RefreshUserToGroupsMappingsResponsePBImpl(RefreshUserToGroupsMappingsResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public RefreshUserToGroupsMappingsResponseProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ApplicationsStore.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ApplicationsStore.java
new file mode 100644
index 0000000..1160c88
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ApplicationsStore.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.recovery;
+
+import java.io.IOException;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationMaster;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+
+public interface ApplicationsStore {
+ public ApplicationStore createApplicationStore(ApplicationId applicationId,
+ ApplicationSubmissionContext context) throws IOException;
+ public void removeApplication(ApplicationId application) throws IOException;
+
+ public interface ApplicationStore {
+ public void storeContainer(Container container) throws IOException;
+ public void removeContainer(Container container) throws IOException;
+ public void storeMasterContainer(Container container) throws IOException;
+ public void updateApplicationState(ApplicationMaster master) throws IOException;
+ public boolean isLoggable();
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemStore.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemStore.java
new file mode 100644
index 0000000..85693dd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemStore.java
@@ -0,0 +1,129 @@
+package org.apache.hadoop.yarn.server.resourcemanager.recovery;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationMaster;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class MemStore implements Store {
+ RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+ private NodeId nodeId;
+ private boolean doneWithRecovery = false;
+
+ public MemStore() {
+ nodeId = recordFactory.newRecordInstance(NodeId.class);
+ nodeId.setHost("TODO");
+ nodeId.setPort(-1);
+ }
+
+ @Override
+ public void storeNode(RMNode node) throws IOException {}
+
+ @Override
+ public void removeNode(RMNode node) throws IOException {}
+
+ private class ApplicationStoreImpl implements ApplicationStore {
+ @Override
+ public void storeContainer(Container container) throws IOException {}
+
+ @Override
+ public void removeContainer(Container container) throws IOException {}
+
+ @Override
+ public void storeMasterContainer(Container container) throws IOException {}
+
+ @Override
+ public void updateApplicationState(
+ ApplicationMaster master) throws IOException {}
+
+ @Override
+ public boolean isLoggable() {
+ return doneWithRecovery;
+ }
+
+ }
+
+ @Override
+ public ApplicationStore createApplicationStore(ApplicationId application,
+ ApplicationSubmissionContext context) throws IOException {
+ return new ApplicationStoreImpl();
+ }
+
+
+ @Override
+ public void removeApplication(ApplicationId application) throws IOException {}
+
+ @Override
+ public RMState restore() throws IOException {
+ MemRMState state = new MemRMState();
+ return state;
+ }
+
+ @Override
+ public synchronized NodeId getNextNodeId() throws IOException {
+ // TODO: FIXMEVinodkv
+// int num = nodeId.getId();
+// num++;
+// nodeId.setId(num);
+ return nodeId;
+ }
+
+ private class MemRMState implements RMState {
+
+ public MemRMState() {
+ nodeId = recordFactory.newRecordInstance(NodeId.class);
+ }
+
+ @Override
+ public List<RMNode> getStoredNodeManagers() {
+ return new ArrayList<RMNode>();
+ }
+
+ @Override
+ public NodeId getLastLoggedNodeId() {
+ return nodeId;
+ }
+
+ @Override
+ public Map<ApplicationId, ApplicationInfo> getStoredApplications() {
+ return new HashMap<ApplicationId, Store.ApplicationInfo>();
+ }
+ }
+
+ @Override
+ public boolean isLoggable() {
+ return doneWithRecovery;
+ }
+
+ @Override
+ public void doneWithRecovery() {
+ doneWithRecovery = true;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NodeStore.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NodeStore.java
new file mode 100644
index 0000000..cc11eec
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NodeStore.java
@@ -0,0 +1,32 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.recovery;
+
+import java.io.IOException;
+
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+
+
+public interface NodeStore {
+ public void storeNode(RMNode node) throws IOException;
+ public void removeNode(RMNode node) throws IOException;
+ public NodeId getNextNodeId() throws IOException;
+ public boolean isLoggable();
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/Recoverable.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/Recoverable.java
new file mode 100644
index 0000000..2dccf59
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/Recoverable.java
@@ -0,0 +1,24 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.resourcemanager.recovery;
+
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store.RMState;
+
+public interface Recoverable {
+ public void recover(RMState state) throws Exception;
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/Store.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/Store.java
new file mode 100644
index 0000000..b784c71
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/Store.java
@@ -0,0 +1,46 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.resourcemanager.recovery;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationMaster;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+
+
+public interface Store extends NodeStore, ApplicationsStore {
+ public interface ApplicationInfo {
+ public ApplicationMaster getApplicationMaster();
+ public Container getMasterContainer();
+ public ApplicationSubmissionContext getApplicationSubmissionContext();
+ public List<Container> getContainers();
+ }
+ public interface RMState {
+ public List<RMNode> getStoredNodeManagers() ;
+ public Map<ApplicationId, ApplicationInfo> getStoredApplications();
+ public NodeId getLastLoggedNodeId();
+ }
+ public RMState restore() throws IOException;
+ public void doneWithRecovery();
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/StoreFactory.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/StoreFactory.java
new file mode 100644
index 0000000..edc5d53
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/StoreFactory.java
@@ -0,0 +1,69 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.resourcemanager.recovery;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.yarn.api.records.ApplicationMaster;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.server.resourcemanager.RMConfig;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
+
+public class StoreFactory {
+
+ public static Store getStore(Configuration conf) {
+ Store store = ReflectionUtils.newInstance(
+ conf.getClass(RMConfig.RM_STORE,
+ MemStore.class, Store.class),
+ conf);
+ return store;
+ }
+
+ public static ApplicationStore createVoidAppStore() {
+ return new VoidApplicationStore();
+ }
+
+ private static class VoidApplicationStore implements ApplicationStore {
+
+ public VoidApplicationStore() {}
+
+ @Override
+ public void storeContainer(Container container) throws IOException {
+ }
+
+ @Override
+ public void removeContainer(Container container) throws IOException {
+ }
+
+ @Override
+ public void storeMasterContainer(Container container) throws IOException {
+ }
+
+ @Override
+ public void updateApplicationState(ApplicationMaster master)
+ throws IOException {
+ }
+
+ @Override
+ public boolean isLoggable() {
+ return false;
+ }
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKStore.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKStore.java
new file mode 100644
index 0000000..baaca6b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKStore.java
@@ -0,0 +1,508 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.recovery;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationMaster;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationMasterPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeReportPBImpl;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationMasterProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProto;
+import org.apache.hadoop.yarn.server.resourcemanager.RMConfig;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.data.Stat;
+
+public class ZKStore implements Store {
+ private final Configuration conf;
+ private final ZooKeeper zkClient;
+ private static final Log LOG = LogFactory.getLog(ZKStore.class);
+ private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+ private static final String NODES = "nodes/";
+ private static final String APPS = "apps/";
+ private static final String ZK_PATH_SEPARATOR = "/";
+ private static final String NODE_ID = "nodeid";
+ private static final String APP_MASTER = "master";
+ private static final String APP_MASTER_CONTAINER = "mastercontainer";
+ private final String ZK_ADDRESS;
+ private final int ZK_TIMEOUT;
+ private boolean doneWithRecovery = false;
+
+ /** TODO make this generic **/
+ private NodeIdPBImpl nodeId = new NodeIdPBImpl();
+
+ /**
+ * TODO fix this for later to handle all kinds of events
+ * of connection and session events.
+ *
+ */
+ private static class ZKWatcher implements Watcher {
+ @Override
+ public void process(WatchedEvent arg0) {
+ }
+ }
+
+ public ZKStore(Configuration conf) throws IOException {
+ this.conf = conf;
+ this.ZK_ADDRESS = conf.get(RMConfig.ZK_ADDRESS);
+ this.ZK_TIMEOUT = conf.getInt(RMConfig.ZK_SESSION_TIMEOUT,
+ RMConfig.DEFAULT_ZK_TIMEOUT);
+ zkClient = new ZooKeeper(this.ZK_ADDRESS,
+ this.ZK_TIMEOUT,
+ createZKWatcher()
+ );
+ // TODO: FIXMEVinodkv
+// this.nodeId.setId(0);
+ }
+
+ protected Watcher createZKWatcher() {
+ return new ZKWatcher();
+ }
+
+ private NodeReportPBImpl createNodeManagerInfo(RMNode rmNode) {
+ NodeReport node =
+ recordFactory.newRecordInstance(NodeReport.class);
+ node.setNodeId(rmNode.getNodeID());
+ node.setRackName(rmNode.getRackName());
+ node.setCapability(rmNode.getTotalCapability());
+ // TODO: FIXME
+// node.setUsed(nodeInfo.getUsedResource());
+ // TODO: acm: refactor2 FIXME
+// node.setNumContainers(rmNode.getNumContainers());
+ return (NodeReportPBImpl)node;
+ }
+
+ @Override
+ public synchronized void storeNode(RMNode node) throws IOException {
+ /** create a storage node and store it in zk **/
+ if (!doneWithRecovery) return;
+ NodeReportPBImpl nodeManagerInfo = createNodeManagerInfo(node);
+ // TODO FinBugs - will be fixed after the subsequent fixme
+ byte[] bytes = nodeManagerInfo.getProto().toByteArray();
+ // TODO: FIXMEVinodkv
+// try {
+// zkClient.create(NODES + Integer.toString(node.getNodeID().getId()), bytes, null,
+// CreateMode.PERSISTENT);
+// } catch(InterruptedException ie) {
+// LOG.info("Interrupted", ie);
+// throw new InterruptedIOException("Interrupted");
+// } catch(KeeperException ke) {
+// LOG.info("Keeper exception", ke);
+// throw convertToIOException(ke);
+// }
+ }
+
+ @Override
+ public synchronized void removeNode(RMNode node) throws IOException {
+ if (!doneWithRecovery) return;
+
+// TODO: FIXME VINODKV
+// /** remove a storage node **/
+// try {
+// zkClient.delete(NODES + Integer.toString(node.getNodeID().getId()), -1);
+// } catch(InterruptedException ie) {
+// LOG.info("Interrupted", ie);
+// throw new InterruptedIOException("Interrupted");
+// } catch(KeeperException ke) {
+// LOG.info("Keeper exception", ke);
+// throw convertToIOException(ke);
+// }
+
+ }
+
+ private static IOException convertToIOException(KeeperException ke) {
+ IOException io = new IOException();
+ io.setStackTrace(ke.getStackTrace());
+ return io;
+ }
+
+ @Override
+ public synchronized NodeId getNextNodeId() throws IOException {
+// TODO: FIXME VINODKV
+// int num = nodeId.getId();
+// num++;
+// nodeId.setId(num);
+// try {
+// zkClient.setData(NODES + NODE_ID, nodeId.getProto().toByteArray() , -1);
+// } catch(InterruptedException ie) {
+// LOG.info("Interrupted", ie);
+// throw new InterruptedIOException(ie.getMessage());
+// } catch(KeeperException ke) {
+// throw convertToIOException(ke);
+// }
+ return nodeId;
+ }
+
+ private String containerPathFromContainerId(ContainerId containerId) {
+ String appString = ConverterUtils.toString(containerId.getAppId());
+ return appString + "/" + containerId.getId();
+ }
+
+ private class ZKApplicationStore implements ApplicationStore {
+ private final ApplicationId applicationId;
+
+ public ZKApplicationStore(ApplicationId applicationId) {
+ this.applicationId = applicationId;
+ }
+
+ @Override
+ public void storeMasterContainer(Container container) throws IOException {
+ if (!doneWithRecovery) return;
+
+ ContainerPBImpl containerPBImpl = (ContainerPBImpl) container;
+ try {
+ zkClient.setData(APPS + ConverterUtils.toString(container.getId().getAppId()) +
+ ZK_PATH_SEPARATOR + APP_MASTER_CONTAINER
+ , containerPBImpl.getProto().toByteArray(), -1);
+ } catch(InterruptedException ie) {
+ LOG.info("Interrupted", ie);
+ throw new InterruptedIOException(ie.getMessage());
+ } catch(KeeperException ke) {
+ LOG.info("Keeper exception", ke);
+ throw convertToIOException(ke);
+ }
+ }
+ @Override
+ public synchronized void storeContainer(Container container) throws IOException {
+ if (!doneWithRecovery) return;
+
+ ContainerPBImpl containerPBImpl = (ContainerPBImpl) container;
+ try {
+ zkClient.create(APPS + containerPathFromContainerId(container.getId())
+ , containerPBImpl.getProto().toByteArray(), null, CreateMode.PERSISTENT);
+ } catch(InterruptedException ie) {
+ LOG.info("Interrupted", ie);
+ throw new InterruptedIOException(ie.getMessage());
+ } catch(KeeperException ke) {
+ LOG.info("Keeper exception", ke);
+ throw convertToIOException(ke);
+ }
+ }
+
+ @Override
+ public synchronized void removeContainer(Container container) throws IOException {
+ if (!doneWithRecovery) return;
+ try {
+ zkClient.delete(APPS + containerPathFromContainerId(container.getId()),
+ -1);
+ } catch(InterruptedException ie) {
+ throw new InterruptedIOException(ie.getMessage());
+ } catch(KeeperException ke) {
+ LOG.info("Keeper exception", ke);
+ throw convertToIOException(ke);
+ }
+ }
+
+ @Override
+ public void updateApplicationState(
+ ApplicationMaster master) throws IOException {
+ if (!doneWithRecovery) return;
+
+ String appString = APPS + ConverterUtils.toString(applicationId);
+ ApplicationMasterPBImpl masterPBImpl = (ApplicationMasterPBImpl) master;
+ try {
+ zkClient.setData(appString, masterPBImpl.getProto().toByteArray(), -1);
+ } catch(InterruptedException ie) {
+ LOG.info("Interrupted", ie);
+ throw new InterruptedIOException(ie.getMessage());
+ } catch(KeeperException ke) {
+ LOG.info("Keeper exception", ke);
+ throw convertToIOException(ke);
+ }
+ }
+
+ @Override
+ public boolean isLoggable() {
+ return doneWithRecovery;
+ }
+ }
+
+ @Override
+ public synchronized ApplicationStore createApplicationStore(ApplicationId application,
+ ApplicationSubmissionContext context) throws IOException {
+ if (!doneWithRecovery) return new ZKApplicationStore(application);
+
+ ApplicationSubmissionContextPBImpl contextPBImpl = (ApplicationSubmissionContextPBImpl) context;
+ String appString = APPS + ConverterUtils.toString(application);
+
+ ApplicationMasterPBImpl masterPBImpl = new ApplicationMasterPBImpl();
+ ContainerPBImpl container = new ContainerPBImpl();
+ try {
+ zkClient.create(appString, contextPBImpl.getProto()
+ .toByteArray(), null, CreateMode.PERSISTENT);
+ zkClient.create(appString + ZK_PATH_SEPARATOR + APP_MASTER,
+ masterPBImpl.getProto().toByteArray(), null, CreateMode.PERSISTENT);
+ zkClient.create(appString + ZK_PATH_SEPARATOR + APP_MASTER_CONTAINER,
+ container.getProto().toByteArray(), null, CreateMode.PERSISTENT);
+ } catch(InterruptedException ie) {
+ LOG.info("Interrupted", ie);
+ throw new InterruptedIOException(ie.getMessage());
+ } catch(KeeperException ke) {
+ LOG.info("Keeper exception", ke);
+ throw convertToIOException(ke);
+ }
+ return new ZKApplicationStore(application);
+ }
+
+ @Override
+ public synchronized void removeApplication(ApplicationId application) throws IOException {
+ if (!doneWithRecovery) return;
+
+ try {
+ zkClient.delete(APPS + ConverterUtils.toString(application), -1);
+ } catch(InterruptedException ie) {
+ LOG.info("Interrupted", ie);
+ throw new InterruptedIOException(ie.getMessage());
+ } catch(KeeperException ke) {
+ LOG.info("Keeper Exception", ke);
+ throw convertToIOException(ke);
+ }
+ }
+
+ @Override
+ public boolean isLoggable() {
+ return doneWithRecovery;
+ }
+
+ @Override
+ public void doneWithRecovery() {
+ this.doneWithRecovery = true;
+ }
+
+
+ @Override
+ public synchronized RMState restore() throws IOException {
+ ZKRMState rmState = new ZKRMState();
+ rmState.load();
+ return rmState;
+ }
+
+ private static class ApplicationInfoImpl implements ApplicationInfo {
+ private ApplicationMaster master;
+ private Container masterContainer;
+
+ private final ApplicationSubmissionContext context;
+ private final List<Container> containers = new ArrayList<Container>();
+
+ public ApplicationInfoImpl(ApplicationSubmissionContext context) {
+ this.context = context;
+ }
+
+ public void setApplicationMaster(ApplicationMaster master) {
+ this.master = master;
+ }
+
+ public void setMasterContainer(Container container) {
+ this.masterContainer = container;
+ }
+
+ @Override
+ public ApplicationMaster getApplicationMaster() {
+ return this.master;
+ }
+
+ @Override
+ public ApplicationSubmissionContext getApplicationSubmissionContext() {
+ return this.context;
+ }
+
+ @Override
+ public Container getMasterContainer() {
+ return this.masterContainer;
+ }
+
+ @Override
+ public List<Container> getContainers() {
+ return this.containers;
+ }
+
+ public void addContainer(Container container) {
+ containers.add(container);
+ }
+ }
+
+ private class ZKRMState implements RMState {
+ private List<RMNode> nodeManagers = new ArrayList<RMNode>();
+ private Map<ApplicationId, ApplicationInfo> applications = new
+ HashMap<ApplicationId, ApplicationInfo>();
+
+ public ZKRMState() {
+ LOG.info("Restoring RM state from ZK");
+ }
+
+ private synchronized List<NodeReport> listStoredNodes() throws IOException {
+ /** get the list of nodes stored in zk **/
+ //TODO PB
+ List<NodeReport> nodes = new ArrayList<NodeReport>();
+ Stat stat = new Stat();
+ try {
+ List<String> children = zkClient.getChildren(NODES, false);
+ for (String child: children) {
+ byte[] data = zkClient.getData(NODES + child, false, stat);
+ NodeReportPBImpl nmImpl = new NodeReportPBImpl(
+ NodeReportProto.parseFrom(data));
+ nodes.add(nmImpl);
+ }
+ } catch (InterruptedException ie) {
+ LOG.info("Interrupted" , ie);
+ throw new InterruptedIOException("Interrupted");
+ } catch(KeeperException ke) {
+ LOG.error("Failed to list nodes", ke);
+ throw convertToIOException(ke);
+ }
+ return nodes;
+ }
+
+ @Override
+ public List<RMNode> getStoredNodeManagers() {
+ return nodeManagers;
+ }
+
+ @Override
+ public NodeId getLastLoggedNodeId() {
+ return nodeId;
+ }
+
+ private void readLastNodeId() throws IOException {
+ Stat stat = new Stat();
+ try {
+ byte[] data = zkClient.getData(NODES + NODE_ID, false, stat);
+ nodeId = new NodeIdPBImpl(NodeIdProto.parseFrom(data));
+ } catch(InterruptedException ie) {
+ LOG.info("Interrupted", ie);
+ throw new InterruptedIOException(ie.getMessage());
+ } catch(KeeperException ke) {
+ LOG.info("Keeper Exception", ke);
+ throw convertToIOException(ke);
+ }
+ }
+
+ private ApplicationInfo getAppInfo(String app) throws IOException {
+ ApplicationInfoImpl info = null;
+ Stat stat = new Stat();
+ try {
+ ApplicationSubmissionContext context = null;
+ byte[] data = zkClient.getData(APPS + app, false, stat);
+ context = new ApplicationSubmissionContextPBImpl(
+ ApplicationSubmissionContextProto.parseFrom(data));
+ info = new ApplicationInfoImpl(context);
+ List<String> children = zkClient.getChildren(APPS + app, false, stat);
+ ApplicationMaster master = null;
+ for (String child: children) {
+ byte[] childdata = zkClient.getData(APPS + app + ZK_PATH_SEPARATOR + child, false, stat);
+ if (APP_MASTER.equals(child)) {
+ master = new ApplicationMasterPBImpl(ApplicationMasterProto.parseFrom(childdata));
+ info.setApplicationMaster(master);
+ } else if (APP_MASTER_CONTAINER.equals(child)) {
+ Container masterContainer = new ContainerPBImpl(ContainerProto.parseFrom(data));
+ info.setMasterContainer(masterContainer);
+ } else {
+ Container container = new ContainerPBImpl(ContainerProto.parseFrom(data));
+ info.addContainer(container);
+ }
+ }
+ } catch(InterruptedException ie) {
+ LOG.info("Interrupted", ie);
+ throw new InterruptedIOException(ie.getMessage());
+ } catch(KeeperException ke) {
+ throw convertToIOException(ke);
+ }
+ return info;
+ }
+
+ private void load() throws IOException {
+ List<NodeReport> nodeInfos = listStoredNodes();
+ final Pattern trackerPattern = Pattern.compile(".*:.*");
+ final Matcher m = trackerPattern.matcher("");
+ for (NodeReport node: nodeInfos) {
+ m.reset(node.getNodeId().getHost());
+ if (!m.find()) {
+ LOG.info("Skipping node, bad node-address "
+ + node.getNodeId().getHost());
+ continue;
+ }
+ String hostName = m.group(0);
+ int cmPort = Integer.valueOf(m.group(1));
+ m.reset(node.getHttpAddress());
+ if (!m.find()) {
+ LOG.info("Skipping node, bad http-address " + node.getHttpAddress());
+ continue;
+ }
+ int httpPort = Integer.valueOf(m.group(1));
+ // TODO: FindBugs Valid. Fix
+ RMNode nm = new RMNodeImpl(node.getNodeId(), null,
+ hostName, cmPort, httpPort,
+ ResourceTrackerService.resolve(node.getNodeId().getHost()),
+ node.getCapability());
+ nodeManagers.add(nm);
+ }
+ readLastNodeId();
+ /* make sure we get all the applications */
+ List<String> apps = null;
+ try {
+ apps = zkClient.getChildren(APPS, false);
+ } catch(InterruptedException ie) {
+ LOG.info("Interrupted", ie);
+ throw new InterruptedIOException(ie.getMessage());
+ } catch(KeeperException ke) {
+ throw convertToIOException(ke);
+ }
+ for (String app: apps) {
+ ApplicationInfo info = getAppInfo(app);
+ applications.put(info.getApplicationMaster().getApplicationId(), info);
+ }
+ }
+
+ @Override
+ public Map<ApplicationId, ApplicationInfo> getStoredApplications() {
+ return applications;
+ }
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Priority.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Priority.java
new file mode 100644
index 0000000..5060c4c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Priority.java
@@ -0,0 +1,39 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.resource;
+
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+
+public class Priority {
+
+ public static org.apache.hadoop.yarn.api.records.Priority create(int prio) {
+ org.apache.hadoop.yarn.api.records.Priority priority = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(org.apache.hadoop.yarn.api.records.Priority.class);
+ priority.setPriority(prio);
+ return priority;
+ }
+
+ public static class Comparator
+ implements java.util.Comparator<org.apache.hadoop.yarn.api.records.Priority> {
+ @Override
+ public int compare(org.apache.hadoop.yarn.api.records.Priority o1, org.apache.hadoop.yarn.api.records.Priority o2) {
+ return o1.getPriority() - o2.getPriority();
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Resource.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Resource.java
new file mode 100644
index 0000000..14cd1db
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Resource.java
@@ -0,0 +1,73 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.resource;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+
+@Private
+@Evolving
+public class Resource {
+ public static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+ public static final org.apache.hadoop.yarn.api.records.Resource NONE = createResource(0);
+
+
+ public static org.apache.hadoop.yarn.api.records.Resource createResource(int memory) {
+ org.apache.hadoop.yarn.api.records.Resource resource = recordFactory.newRecordInstance(org.apache.hadoop.yarn.api.records.Resource.class);
+ resource.setMemory(memory);
+ return resource;
+ }
+
+ public static void addResource(org.apache.hadoop.yarn.api.records.Resource lhs,
+ org.apache.hadoop.yarn.api.records.Resource rhs) {
+ lhs.setMemory(lhs.getMemory() + rhs.getMemory());
+ }
+
+ public static void subtractResource(org.apache.hadoop.yarn.api.records.Resource lhs,
+ org.apache.hadoop.yarn.api.records.Resource rhs) {
+ lhs.setMemory(lhs.getMemory() - rhs.getMemory());
+ }
+
+ public static boolean equals(org.apache.hadoop.yarn.api.records.Resource lhs,
+ org.apache.hadoop.yarn.api.records.Resource rhs) {
+ return lhs.getMemory() == rhs.getMemory();
+ }
+
+ public static boolean lessThan(org.apache.hadoop.yarn.api.records.Resource lhs,
+ org.apache.hadoop.yarn.api.records.Resource rhs) {
+ return lhs.getMemory() < rhs.getMemory();
+ }
+
+ public static boolean lessThanOrEqual(org.apache.hadoop.yarn.api.records.Resource lhs,
+ org.apache.hadoop.yarn.api.records.Resource rhs) {
+ return lhs.getMemory() <= rhs.getMemory();
+ }
+
+ public static boolean greaterThan(org.apache.hadoop.yarn.api.records.Resource lhs,
+ org.apache.hadoop.yarn.api.records.Resource rhs) {
+ return lhs.getMemory() > rhs.getMemory();
+ }
+
+ public static boolean greaterThanOrEqual(org.apache.hadoop.yarn.api.records.Resource lhs,
+ org.apache.hadoop.yarn.api.records.Resource rhs) {
+ return lhs.getMemory() >= rhs.getMemory();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Resources.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Resources.java
new file mode 100644
index 0000000..e2b062c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Resources.java
@@ -0,0 +1,97 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.resource;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.Records;
+
+@Private
+@Evolving
+public class Resources {
+ // Java doesn't have const :(
+ private static final Resource NONE = createResource(0);
+
+ public static Resource createResource(int memory) {
+ Resource resource = Records.newRecord(Resource.class);
+ resource.setMemory(memory);
+ return resource;
+ }
+
+ public static Resource none() {
+ assert NONE.getMemory() == 0 : "NONE should be empty";
+ return NONE;
+ }
+
+ public static Resource clone(Resource res) {
+ return createResource(res.getMemory());
+ }
+
+ public static Resource addTo(Resource lhs, Resource rhs) {
+ lhs.setMemory(lhs.getMemory() + rhs.getMemory());
+ return lhs;
+ }
+
+ public static Resource add(Resource lhs, Resource rhs) {
+ return addTo(clone(lhs), rhs);
+ }
+
+ public static Resource subtractFrom(Resource lhs, Resource rhs) {
+ lhs.setMemory(lhs.getMemory() - rhs.getMemory());
+ return lhs;
+ }
+
+ public static Resource subtract(Resource lhs, Resource rhs) {
+ return subtractFrom(clone(lhs), rhs);
+ }
+
+ public static Resource negate(Resource resource) {
+ return subtract(NONE, resource);
+ }
+
+ public static Resource multiplyTo(Resource lhs, int by) {
+ lhs.setMemory(lhs.getMemory() * by);
+ return lhs;
+ }
+
+ public static Resource multiply(Resource lhs, int by) {
+ return multiplyTo(clone(lhs), by);
+ }
+
+ public static boolean equals(Resource lhs, Resource rhs) {
+ return lhs.getMemory() == rhs.getMemory();
+ }
+
+ public static boolean lessThan(Resource lhs, Resource rhs) {
+ return lhs.getMemory() < rhs.getMemory();
+ }
+
+ public static boolean lessThanOrEqual(Resource lhs, Resource rhs) {
+ return lhs.getMemory() <= rhs.getMemory();
+ }
+
+ public static boolean greaterThan(Resource lhs, Resource rhs) {
+ return lhs.getMemory() > rhs.getMemory();
+ }
+
+ public static boolean greaterThanOrEqual(Resource lhs, Resource rhs) {
+ return lhs.getMemory() >= rhs.getMemory();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
new file mode 100644
index 0000000..1ea6d83
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
@@ -0,0 +1,39 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+
+public interface RMApp extends EventHandler<RMAppEvent>{
+
+ ApplicationId getApplicationId();
+
+ RMAppState getState();
+
+ String getUser();
+
+ float getProgress();
+
+ RMAppAttempt getRMAppAttempt(ApplicationAttemptId appAttemptId);
+
+ String getQueue();
+
+ String getName();
+
+ RMAppAttempt getCurrentAppAttempt();
+
+ ApplicationReport createAndGetApplicationReport();
+
+ ApplicationStore getApplicationStore();
+
+ long getFinishTime();
+
+ long getStartTime();
+
+ String getTrackingUrl();
+
+ StringBuilder getDiagnostics();
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java
new file mode 100644
index 0000000..c648ae9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java
@@ -0,0 +1,18 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.event.AbstractEvent;
+
+public class RMAppEvent extends AbstractEvent<RMAppEventType>{
+
+ private final ApplicationId appId;
+
+ public RMAppEvent(ApplicationId appId, RMAppEventType type) {
+ super(type);
+ this.appId = appId;
+ }
+
+ public ApplicationId getApplicationId() {
+ return this.appId;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java
new file mode 100644
index 0000000..54bffa9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java
@@ -0,0 +1,15 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
+
+public enum RMAppEventType {
+ // Source: ClientRMService
+ START,
+ KILL,
+
+ // Source: RMAppAttempt
+ APP_REJECTED,
+ APP_ACCEPTED,
+ ATTEMPT_REGISTERED,
+ ATTEMPT_FINISHED, // Will send the final state
+ ATTEMPT_FAILED,
+ ATTEMPT_KILLED
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
new file mode 100644
index 0000000..cdab96b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -0,0 +1,473 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
+
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService;
+import org.apache.hadoop.yarn.server.resourcemanager.RMConfig;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanAppEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
+import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.MultipleArcTransition;
+import org.apache.hadoop.yarn.state.SingleArcTransition;
+import org.apache.hadoop.yarn.state.StateMachine;
+import org.apache.hadoop.yarn.state.StateMachineFactory;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.apache.hadoop.yarn.util.Records;
+
+public class RMAppImpl implements RMApp {
+
+ private static final Log LOG = LogFactory.getLog(RMAppImpl.class);
+
+ // Immutable fields
+ private final ApplicationId applicationId;
+ private final RMContext rmContext;
+ private final Configuration conf;
+ private final String user;
+ private final String queue;
+ private final String name;
+ private final ApplicationSubmissionContext submissionContext;
+ private final String clientTokenStr;
+ private final ApplicationStore appStore;
+ private final Dispatcher dispatcher;
+ private final YarnScheduler scheduler;
+ private final ApplicationMasterService masterService;
+ private final StringBuilder diagnostics = new StringBuilder();
+ private final int maxRetries;
+ private final ReadLock readLock;
+ private final WriteLock writeLock;
+ private final Map<ApplicationAttemptId, RMAppAttempt> attempts
+ = new LinkedHashMap<ApplicationAttemptId, RMAppAttempt>();
+
+ // Mutable fields
+ private long startTime;
+ private long finishTime;
+ private AMLivelinessMonitor amLivelinessMonitor;
+ private RMAppAttempt currentAttempt;
+
+ private static final FinalTransition FINAL_TRANSITION = new FinalTransition();
+
+ private static final StateMachineFactory<RMAppImpl,
+ RMAppState,
+ RMAppEventType,
+ RMAppEvent> stateMachineFactory
+ = new StateMachineFactory<RMAppImpl,
+ RMAppState,
+ RMAppEventType,
+ RMAppEvent>(RMAppState.NEW)
+
+
+ // TODO - ATTEMPT_KILLED not sent right now but should handle if
+ // attempt starts sending
+
+ // Transitions from NEW state
+ .addTransition(RMAppState.NEW, RMAppState.SUBMITTED,
+ RMAppEventType.START, new StartAppAttemptTransition())
+ .addTransition(RMAppState.NEW, RMAppState.KILLED, RMAppEventType.KILL,
+ new AppKilledTransition())
+
+ // Transitions from SUBMITTED state
+ .addTransition(RMAppState.SUBMITTED, RMAppState.FAILED,
+ RMAppEventType.APP_REJECTED, new AppRejectedTransition())
+ .addTransition(RMAppState.SUBMITTED, RMAppState.ACCEPTED,
+ RMAppEventType.APP_ACCEPTED)
+ .addTransition(RMAppState.SUBMITTED, RMAppState.KILLED,
+ RMAppEventType.KILL, new AppKilledTransition())
+
+ // Transitions from ACCEPTED state
+ .addTransition(RMAppState.ACCEPTED, RMAppState.RUNNING,
+ RMAppEventType.ATTEMPT_REGISTERED)
+ .addTransition(RMAppState.ACCEPTED,
+ EnumSet.of(RMAppState.ACCEPTED, RMAppState.FAILED),
+ RMAppEventType.ATTEMPT_FAILED,
+ new AttemptFailedTransition(RMAppState.ACCEPTED))
+ .addTransition(RMAppState.ACCEPTED, RMAppState.KILLED,
+ RMAppEventType.KILL, new AppKilledTransition())
+
+ // Transitions from RUNNING state
+ .addTransition(RMAppState.RUNNING, RMAppState.FINISHED,
+ RMAppEventType.ATTEMPT_FINISHED, FINAL_TRANSITION)
+ .addTransition(RMAppState.RUNNING,
+ EnumSet.of(RMAppState.RUNNING, RMAppState.FAILED),
+ RMAppEventType.ATTEMPT_FAILED,
+ new AttemptFailedTransition(RMAppState.RUNNING))
+ .addTransition(RMAppState.RUNNING, RMAppState.KILLED,
+ RMAppEventType.KILL, new AppKilledTransition())
+
+ // Transitions from RESTARTING state
+ // TODO - no way to get to RESTARTING state right now
+ .addTransition(RMAppState.RESTARTING, RMAppState.RUNNING,
+ RMAppEventType.ATTEMPT_REGISTERED)
+ .addTransition(RMAppState.RESTARTING,
+ EnumSet.of(RMAppState.RESTARTING, RMAppState.FAILED),
+ RMAppEventType.ATTEMPT_FAILED,
+ new AttemptFailedTransition(RMAppState.RESTARTING))
+ .addTransition(RMAppState.RESTARTING, RMAppState.KILLED,
+ RMAppEventType.KILL, new AppKilledTransition())
+
+ // Transitions from FINISHED state
+ .addTransition(RMAppState.FINISHED, RMAppState.FINISHED,
+ RMAppEventType.KILL)
+
+ // Transitions from FAILED state
+ .addTransition(RMAppState.FAILED, RMAppState.FAILED,
+ RMAppEventType.KILL)
+
+ // Transitions from KILLED state
+ .addTransition(
+ RMAppState.KILLED,
+ RMAppState.KILLED,
+ EnumSet.of(RMAppEventType.KILL, RMAppEventType.ATTEMPT_FINISHED,
+ RMAppEventType.ATTEMPT_FAILED, RMAppEventType.ATTEMPT_KILLED))
+
+ .installTopology();
+
+ private final StateMachine<RMAppState, RMAppEventType, RMAppEvent>
+ stateMachine;
+
+ public RMAppImpl(ApplicationId applicationId, RMContext rmContext,
+ Configuration config, String name, String user, String queue,
+ ApplicationSubmissionContext submissionContext, String clientTokenStr,
+ ApplicationStore appStore, AMLivelinessMonitor amLivelinessMonitor,
+ YarnScheduler scheduler, ApplicationMasterService masterService) {
+
+ this.applicationId = applicationId;
+ this.name = name;
+ this.rmContext = rmContext;
+ this.dispatcher = rmContext.getDispatcher();
+ this.conf = config;
+ this.user = user;
+ this.queue = queue;
+ this.submissionContext = submissionContext;
+ this.clientTokenStr = clientTokenStr;
+ this.appStore = appStore;
+ this.amLivelinessMonitor = amLivelinessMonitor;
+ this.scheduler = scheduler;
+ this.masterService = masterService;
+ this.startTime = System.currentTimeMillis();
+
+ this.maxRetries = conf.getInt(RMConfig.AM_MAX_RETRIES,
+ RMConfig.DEFAULT_AM_MAX_RETRIES);
+
+ ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+ this.readLock = lock.readLock();
+ this.writeLock = lock.writeLock();
+
+ this.stateMachine = stateMachineFactory.make(this);
+ }
+
+ @Override
+ public ApplicationId getApplicationId() {
+ return this.applicationId;
+ }
+
+ @Override
+ public RMAppState getState() {
+ this.readLock.lock();
+
+ try {
+ return this.stateMachine.getCurrentState();
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public String getUser() {
+ return this.user;
+ }
+
+ @Override
+ public float getProgress() {
+ this.readLock.lock();
+
+ try {
+ if (this.currentAttempt != null) {
+ return this.currentAttempt.getProgress();
+ }
+ return 0;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public RMAppAttempt getRMAppAttempt(ApplicationAttemptId appAttemptId) {
+ this.readLock.lock();
+
+ try {
+ return this.attempts.get(appAttemptId);
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public String getQueue() {
+ return this.queue;
+ }
+
+ @Override
+ public String getName() {
+ return this.name;
+ }
+
+ @Override
+ public RMAppAttempt getCurrentAppAttempt() {
+ this.readLock.lock();
+
+ try {
+ return this.currentAttempt;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public ApplicationStore getApplicationStore() {
+ return this.appStore;
+ }
+
+ private ApplicationState createApplicationState(RMAppState rmAppState) {
+ switch(rmAppState) {
+ case NEW:
+ return ApplicationState.NEW;
+ case SUBMITTED:
+ case ACCEPTED:
+ return ApplicationState.SUBMITTED;
+ case RESTARTING:
+ return ApplicationState.RESTARTING;
+ case RUNNING:
+ return ApplicationState.RUNNING;
+ case FINISHED:
+ return ApplicationState.SUCCEEDED;
+ case KILLED:
+ return ApplicationState.KILLED;
+ case FAILED:
+ return ApplicationState.FAILED;
+ }
+ throw new YarnException("Unknown state passed!");
+ }
+
+ @Override
+ public ApplicationReport createAndGetApplicationReport() {
+ this.readLock.lock();
+
+ try {
+ String clientToken = "N/A";
+ String trackingUrl = "N/A";
+ String host = "N/A";
+ int rpcPort = -1;
+ if (this.currentAttempt != null) {
+ trackingUrl = this.currentAttempt.getTrackingUrl();
+ clientToken = this.currentAttempt.getClientToken();
+ host = this.currentAttempt.getHost();
+ rpcPort = this.currentAttempt.getRpcPort();
+ }
+ return BuilderUtils.newApplicationReport(this.applicationId, this.user,
+ this.queue, this.name, host, rpcPort, clientToken,
+ createApplicationState(this.stateMachine.getCurrentState()),
+ this.diagnostics.toString(), trackingUrl);
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public long getFinishTime() {
+ this.readLock.lock();
+
+ try {
+ return this.finishTime;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public long getStartTime() {
+ this.readLock.lock();
+
+ try {
+ return this.startTime;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public String getTrackingUrl() {
+ this.readLock.lock();
+
+ try {
+ if (this.currentAttempt != null) {
+ return this.currentAttempt.getTrackingUrl();
+ }
+ return null;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public StringBuilder getDiagnostics() {
+ this.readLock.lock();
+
+ try {
+ return this.diagnostics;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public void handle(RMAppEvent event) {
+
+ this.writeLock.lock();
+
+ try {
+ ApplicationId appID = event.getApplicationId();
+ LOG.info("Processing event for " + appID + " of type "
+ + event.getType());
+ final RMAppState oldState = getState();
+ try {
+ /* keep the master in sync with the state machine */
+ this.stateMachine.doTransition(event.getType(), event);
+ } catch (InvalidStateTransitonException e) {
+ LOG.error("Can't handle this event at current state", e);
+ /* TODO fail the application on the failed transition */
+ }
+
+ if (oldState != getState()) {
+ LOG.info(appID + " State change from " + oldState + " to "
+ + getState());
+ }
+ } finally {
+ this.writeLock.unlock();
+ }
+ }
+
+ private static class RMAppTransition implements
+ SingleArcTransition<RMAppImpl, RMAppEvent> {
+ public void transition(RMAppImpl app, RMAppEvent event) {
+ };
+
+ }
+
+ private static final class StartAppAttemptTransition extends RMAppTransition {
+ public void transition(RMAppImpl app, RMAppEvent event) {
+
+ ApplicationAttemptId appAttemptId = Records
+ .newRecord(ApplicationAttemptId.class);
+ appAttemptId.setApplicationId(app.applicationId);
+ appAttemptId.setAttemptId(app.attempts.size() + 1);
+
+ RMAppAttempt attempt = new RMAppAttemptImpl(appAttemptId,
+ app.clientTokenStr, app.rmContext, app.scheduler,
+ app.masterService, app.submissionContext);
+ app.attempts.put(appAttemptId, attempt);
+ app.currentAttempt = attempt;
+ app.dispatcher.getEventHandler().handle(
+ new RMAppAttemptEvent(appAttemptId, RMAppAttemptEventType.START));
+ };
+ }
+
+ private static final class AppKilledTransition extends FinalTransition {
+ public void transition(RMAppImpl app, RMAppEvent event) {
+ app.diagnostics.append("Application killed by user.");
+ super.transition(app, event);
+ };
+ }
+
+ private static final class AppRejectedTransition extends
+ FinalTransition{
+ public void transition(RMAppImpl app, RMAppEvent event) {
+ RMAppRejectedEvent rejectedEvent = (RMAppRejectedEvent)event;
+ app.diagnostics.append(rejectedEvent.getMessage());
+ super.transition(app, event);
+ };
+ }
+
+ private static class FinalTransition extends RMAppTransition {
+
+ private Set<NodeId> getNodesOnWhichAttemptRan(RMAppImpl app) {
+ Set<NodeId> nodes = new HashSet<NodeId>();
+ for (RMAppAttempt attempt : app.attempts.values()) {
+ nodes.addAll(attempt.getRanNodes());
+ }
+ return nodes;
+ }
+
+ public void transition(RMAppImpl app, RMAppEvent event) {
+ Set<NodeId> nodes = getNodesOnWhichAttemptRan(app);
+ for (NodeId nodeId : nodes) {
+ app.dispatcher.getEventHandler().handle(
+ new RMNodeCleanAppEvent(nodeId, app.applicationId));
+ }
+ app.finishTime = System.currentTimeMillis();
+ };
+ }
+
+ private static final class AttemptFailedTransition implements
+ MultipleArcTransition<RMAppImpl, RMAppEvent, RMAppState> {
+
+ private final RMAppState initialState;
+
+ public AttemptFailedTransition(RMAppState initialState) {
+ this.initialState = initialState;
+ }
+
+ @Override
+ public RMAppState transition(RMAppImpl app, RMAppEvent event) {
+
+ if (app.attempts.size() == app.maxRetries) {
+ app.diagnostics.append("Application " + app.getApplicationId()
+ + " failed " + app.maxRetries
+ + " times. Failing the application.");
+ // Inform the node for app-finish
+ FINAL_TRANSITION.transition(app, event);
+ return RMAppState.FAILED;
+ }
+
+ ApplicationAttemptId appAttemptId = Records
+ .newRecord(ApplicationAttemptId.class);
+ appAttemptId.setApplicationId(app.applicationId);
+ appAttemptId.setAttemptId(app.attempts.size() + 1);
+
+ // Create a new attempt.
+ RMAppAttempt attempt = new RMAppAttemptImpl(appAttemptId,
+ app.clientTokenStr, app.rmContext, app.scheduler,
+ app.masterService, app.submissionContext);
+ app.attempts.put(appAttemptId, attempt);
+ app.currentAttempt = attempt;
+ app.dispatcher.getEventHandler().handle(
+ new RMAppAttemptEvent(appAttemptId, RMAppAttemptEventType.START));
+ return initialState;
+ }
+
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppRejectedEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppRejectedEvent.java
new file mode 100644
index 0000000..38116b1
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppRejectedEvent.java
@@ -0,0 +1,17 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+
+public class RMAppRejectedEvent extends RMAppEvent {
+
+ private final String message;
+
+ public RMAppRejectedEvent(ApplicationId appId, String message) {
+ super(appId, RMAppEventType.APP_REJECTED);
+ this.message = message;
+ }
+
+ public String getMessage() {
+ return this.message;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppState.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppState.java
new file mode 100644
index 0000000..471b583
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppState.java
@@ -0,0 +1,5 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
+
+public enum RMAppState {
+ NEW, SUBMITTED, ACCEPTED, RUNNING, RESTARTING, FINISHED, FAILED, KILLED
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/AMLivelinessMonitor.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/AMLivelinessMonitor.java
new file mode 100644
index 0000000..63775ac
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/AMLivelinessMonitor.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.SystemClock;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.resourcemanager.RMConfig;
+import org.apache.hadoop.yarn.util.AbstractLivelinessMonitor;
+
+public class AMLivelinessMonitor extends AbstractLivelinessMonitor<ApplicationAttemptId> {
+
+ private EventHandler dispatcher;
+
+ public AMLivelinessMonitor(Dispatcher d) {
+ super("AMLivelinessMonitor", new SystemClock());
+ this.dispatcher = d.getEventHandler();
+ }
+
+ public void init(Configuration conf) {
+ super.init(conf);
+ setExpireInterval(conf.getInt(YarnConfiguration.AM_EXPIRY_INTERVAL,
+ RMConfig.DEFAULT_AM_EXPIRY_INTERVAL));
+ setMonitorInterval(conf.getInt(RMConfig.AMLIVELINESS_MONITORING_INTERVAL,
+ RMConfig.DEFAULT_AMLIVELINESS_MONITORING_INTERVAL));
+ }
+
+ @Override
+ protected void expire(ApplicationAttemptId id) {
+ dispatcher.handle(
+ new RMAppAttemptEvent(id, RMAppAttemptEventType.EXPIRE));
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
new file mode 100644
index 0000000..2e48abd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
@@ -0,0 +1,39 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt;
+
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.event.EventHandler;
+
+public interface RMAppAttempt extends EventHandler<RMAppAttemptEvent>{
+
+ ApplicationAttemptId getAppAttemptId();
+
+ RMAppAttemptState getAppAttemptState();
+
+ String getHost();
+
+ int getRpcPort();
+
+ String getTrackingUrl();
+
+ String getClientToken();
+
+ StringBuilder getDiagnostics();
+
+ float getProgress();
+
+ Set<NodeId> getRanNodes();
+
+ List<Container> pullJustFinishedContainers();
+
+ List<Container> getJustFinishedContainers();
+
+ Container getMasterContainer();
+
+ ApplicationSubmissionContext getSubmissionContext();
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptEvent.java
new file mode 100644
index 0000000..c5a3f40
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptEvent.java
@@ -0,0 +1,19 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.event.AbstractEvent;
+
+public class RMAppAttemptEvent extends AbstractEvent<RMAppAttemptEventType> {
+
+ private final ApplicationAttemptId appAttemptId;
+
+ public RMAppAttemptEvent(ApplicationAttemptId appAttemptId,
+ RMAppAttemptEventType type) {
+ super(type);
+ this.appAttemptId = appAttemptId;
+ }
+
+ public ApplicationAttemptId getApplicationAttemptId() {
+ return this.appAttemptId;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptEventType.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptEventType.java
new file mode 100644
index 0000000..38a17a2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptEventType.java
@@ -0,0 +1,29 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt;
+
+public enum RMAppAttemptEventType {
+ // Source: RMApp
+ START,
+ KILL,
+
+ // Source: AMLauncher
+ LAUNCHED,
+ LAUNCH_FAILED,
+
+ // Source: AMLivelinessMonitor
+ EXPIRE,
+
+ // Source: ApplicationMasterService
+ REGISTERED,
+ STATUS_UPDATE,
+ UNREGISTERED,
+
+ // Source: Containers
+ CONTAINER_ACQUIRED,
+ CONTAINER_ALLOCATED,
+ CONTAINER_FINISHED,
+
+ // Source: Scheduler
+ APP_REJECTED,
+ APP_ACCEPTED,
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
new file mode 100644
index 0000000..3daf161
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -0,0 +1,707 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRejectedEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAcquiredEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptLaunchFailedEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptRegistrationEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptRejectedEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptStatusupdateEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptUnregistrationEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent;
+import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.MultipleArcTransition;
+import org.apache.hadoop.yarn.state.SingleArcTransition;
+import org.apache.hadoop.yarn.state.StateMachine;
+import org.apache.hadoop.yarn.state.StateMachineFactory;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+
+public class RMAppAttemptImpl implements RMAppAttempt {
+
+ private static final Log LOG = LogFactory.getLog(RMAppAttemptImpl.class);
+
+ private static final RecordFactory recordFactory = RecordFactoryProvider
+ .getRecordFactory(null);
+
+ public final static Priority AM_CONTAINER_PRIORITY = recordFactory
+ .newRecordInstance(Priority.class);
+ static {
+ AM_CONTAINER_PRIORITY.setPriority(0);
+ }
+
+ private final StateMachine<RMAppAttemptState,
+ RMAppAttemptEventType,
+ RMAppAttemptEvent> stateMachine;
+
+ private final RMContext rmContext;
+ private final EventHandler eventHandler;
+ private final YarnScheduler scheduler;
+ private final ApplicationMasterService masterService;
+
+ private final ReadLock readLock;
+ private final WriteLock writeLock;
+
+ private final ApplicationAttemptId applicationAttemptId;
+ private final String clientToken;
+ private final ApplicationSubmissionContext submissionContext;
+
+ //nodes on while this attempt's containers ran
+ private final Set<NodeId> ranNodes =
+ new HashSet<NodeId>();
+ private final List<Container> justFinishedContainers =
+ new ArrayList<Container>();
+ private Container masterContainer;
+
+ private float progress = 0;
+ private String host;
+ private int rpcPort;
+ private String trackingUrl;
+ private String finalState;
+ private final StringBuilder diagnostics = new StringBuilder();
+
+ private static final StateMachineFactory<RMAppAttemptImpl,
+ RMAppAttemptState,
+ RMAppAttemptEventType,
+ RMAppAttemptEvent>
+ stateMachineFactory = new StateMachineFactory<RMAppAttemptImpl,
+ RMAppAttemptState,
+ RMAppAttemptEventType,
+ RMAppAttemptEvent>(RMAppAttemptState.NEW)
+
+ // Transitions from NEW State
+ .addTransition(RMAppAttemptState.NEW, RMAppAttemptState.SUBMITTED,
+ RMAppAttemptEventType.START, new AttemptStartedTransition())
+ .addTransition(RMAppAttemptState.NEW, RMAppAttemptState.KILLED,
+ RMAppAttemptEventType.KILL)
+
+ // Transitions from SUBMITTED state
+ .addTransition(RMAppAttemptState.SUBMITTED, RMAppAttemptState.FAILED,
+ RMAppAttemptEventType.APP_REJECTED, new AppRejectedTransition())
+ .addTransition(RMAppAttemptState.SUBMITTED, RMAppAttemptState.SCHEDULED,
+ RMAppAttemptEventType.APP_ACCEPTED, new ScheduleTransition())
+ .addTransition(RMAppAttemptState.SUBMITTED, RMAppAttemptState.KILLED,
+ RMAppAttemptEventType.KILL,
+ new BaseFinalTransition(RMAppAttemptState.KILLED))
+
+ // Transitions from SCHEDULED State
+ .addTransition(RMAppAttemptState.SCHEDULED,
+ RMAppAttemptState.ALLOCATED,
+ RMAppAttemptEventType.CONTAINER_ALLOCATED,
+ new AMContainerAllocatedTransition())
+ .addTransition(RMAppAttemptState.SCHEDULED, RMAppAttemptState.KILLED,
+ RMAppAttemptEventType.KILL,
+ new BaseFinalTransition(RMAppAttemptState.KILLED))
+
+ // Transitions from ALLOCATED State
+ .addTransition(RMAppAttemptState.ALLOCATED,
+ RMAppAttemptState.ALLOCATED,
+ RMAppAttemptEventType.CONTAINER_ACQUIRED,
+ new ContainerAcquiredTransition())
+ .addTransition(RMAppAttemptState.ALLOCATED, RMAppAttemptState.LAUNCHED,
+ RMAppAttemptEventType.LAUNCHED, new AMLaunchedTransition())
+ .addTransition(RMAppAttemptState.ALLOCATED, RMAppAttemptState.FAILED,
+ RMAppAttemptEventType.LAUNCH_FAILED, new LaunchFailedTransition())
+ .addTransition(RMAppAttemptState.ALLOCATED, RMAppAttemptState.KILLED,
+ RMAppAttemptEventType.KILL, new KillAllocatedAMTransition())
+
+ // Transitions from LAUNCHED State
+ .addTransition(RMAppAttemptState.LAUNCHED, RMAppAttemptState.RUNNING,
+ RMAppAttemptEventType.REGISTERED, new AMRegisteredTransition())
+ .addTransition(RMAppAttemptState.LAUNCHED, RMAppAttemptState.FAILED,
+ RMAppAttemptEventType.CONTAINER_FINISHED,
+ new AMContainerCrashedTransition())
+ .addTransition(
+ RMAppAttemptState.LAUNCHED, RMAppAttemptState.FAILED,
+ RMAppAttemptEventType.EXPIRE,
+ new FinalTransition(RMAppAttemptState.FAILED))
+ .addTransition(RMAppAttemptState.LAUNCHED, RMAppAttemptState.KILLED,
+ RMAppAttemptEventType.KILL,
+ new FinalTransition(RMAppAttemptState.KILLED))
+
+ // Transitions from RUNNING State
+ .addTransition(RMAppAttemptState.RUNNING, RMAppAttemptState.FINISHED,
+ RMAppAttemptEventType.UNREGISTERED, new AMUnregisteredTransition())
+ .addTransition(RMAppAttemptState.RUNNING, RMAppAttemptState.RUNNING,
+ RMAppAttemptEventType.STATUS_UPDATE, new StatusUpdateTransition())
+ .addTransition(RMAppAttemptState.RUNNING, RMAppAttemptState.RUNNING,
+ RMAppAttemptEventType.CONTAINER_ALLOCATED)
+ .addTransition(
+ RMAppAttemptState.RUNNING, RMAppAttemptState.RUNNING,
+ RMAppAttemptEventType.CONTAINER_ACQUIRED,
+ new ContainerAcquiredTransition())
+ .addTransition(
+ RMAppAttemptState.RUNNING,
+ EnumSet.of(RMAppAttemptState.RUNNING, RMAppAttemptState.FAILED),
+ RMAppAttemptEventType.CONTAINER_FINISHED,
+ new ContainerFinishedTransition())
+ .addTransition(
+ RMAppAttemptState.RUNNING, RMAppAttemptState.FAILED,
+ RMAppAttemptEventType.EXPIRE,
+ new FinalTransition(RMAppAttemptState.FAILED))
+ .addTransition(
+ RMAppAttemptState.RUNNING, RMAppAttemptState.KILLED,
+ RMAppAttemptEventType.KILL,
+ new FinalTransition(RMAppAttemptState.KILLED))
+
+ // Transitions from FAILED State
+ .addTransition(
+ RMAppAttemptState.FAILED,
+ RMAppAttemptState.FAILED,
+ EnumSet.of(
+ RMAppAttemptEventType.EXPIRE,
+ RMAppAttemptEventType.KILL,
+ RMAppAttemptEventType.UNREGISTERED,
+ RMAppAttemptEventType.STATUS_UPDATE,
+ RMAppAttemptEventType.CONTAINER_ALLOCATED,
+ RMAppAttemptEventType.CONTAINER_FINISHED))
+
+ // Transitions from FINISHED State
+ .addTransition(
+ RMAppAttemptState.FINISHED,
+ RMAppAttemptState.FINISHED,
+ EnumSet.of(
+ RMAppAttemptEventType.EXPIRE,
+ RMAppAttemptEventType.UNREGISTERED,
+ RMAppAttemptEventType.CONTAINER_ALLOCATED,
+ RMAppAttemptEventType.CONTAINER_FINISHED,
+ RMAppAttemptEventType.KILL))
+
+ // Transitions from KILLED State
+ .addTransition(
+ RMAppAttemptState.KILLED,
+ RMAppAttemptState.KILLED,
+ EnumSet.of(RMAppAttemptEventType.EXPIRE,
+ RMAppAttemptEventType.LAUNCHED,
+ RMAppAttemptEventType.LAUNCH_FAILED,
+ RMAppAttemptEventType.EXPIRE,
+ RMAppAttemptEventType.REGISTERED,
+ RMAppAttemptEventType.CONTAINER_ALLOCATED,
+ RMAppAttemptEventType.CONTAINER_FINISHED,
+ RMAppAttemptEventType.UNREGISTERED,
+ RMAppAttemptEventType.KILL,
+ RMAppAttemptEventType.STATUS_UPDATE))
+
+ .installTopology();
+
+ public RMAppAttemptImpl(ApplicationAttemptId appAttemptId,
+ String clientToken, RMContext rmContext, YarnScheduler scheduler,
+ ApplicationMasterService masterService,
+ ApplicationSubmissionContext submissionContext) {
+
+ this.applicationAttemptId = appAttemptId;
+ this.rmContext = rmContext;
+ this.eventHandler = rmContext.getDispatcher().getEventHandler();
+ this.submissionContext = submissionContext;
+ this.scheduler = scheduler;
+ this.masterService = masterService;
+ this.clientToken = clientToken;
+
+ ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+ this.readLock = lock.readLock();
+ this.writeLock = lock.writeLock();
+
+ this.stateMachine = stateMachineFactory.make(this);
+ }
+
+ @Override
+ public ApplicationAttemptId getAppAttemptId() {
+ return this.applicationAttemptId;
+ }
+
+ @Override
+ public ApplicationSubmissionContext getSubmissionContext() {
+ return this.submissionContext;
+ }
+
+ @Override
+ public RMAppAttemptState getAppAttemptState() {
+ this.readLock.lock();
+ try {
+ return this.stateMachine.getCurrentState();
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public String getHost() {
+ this.readLock.lock();
+
+ try {
+ return this.host;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public int getRpcPort() {
+ this.readLock.lock();
+
+ try {
+ return this.rpcPort;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public String getTrackingUrl() {
+ this.readLock.lock();
+
+ try {
+ return this.trackingUrl;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public String getClientToken() {
+ return this.clientToken;
+ }
+
+ @Override
+ public StringBuilder getDiagnostics() {
+ this.readLock.lock();
+
+ try {
+ return this.diagnostics;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public float getProgress() {
+ this.readLock.lock();
+
+ try {
+ return this.progress;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public List<Container> getJustFinishedContainers() {
+ this.readLock.lock();
+ try {
+ return this.justFinishedContainers;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public List<Container> pullJustFinishedContainers() {
+ this.writeLock.lock();
+
+ try {
+ List<Container> returnList = new ArrayList<Container>(
+ this.justFinishedContainers.size());
+ returnList.addAll(this.justFinishedContainers);
+ this.justFinishedContainers.clear();
+ return returnList;
+ } finally {
+ this.writeLock.unlock();
+ }
+ }
+
+ @Override
+ public Set<NodeId> getRanNodes() {
+ return ranNodes;
+ }
+
+ @Override
+ public Container getMasterContainer() {
+ this.readLock.lock();
+
+ try {
+ return this.masterContainer;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public void handle(RMAppAttemptEvent event) {
+
+ this.writeLock.lock();
+
+ try {
+ ApplicationAttemptId appAttemptID = event.getApplicationAttemptId();
+ LOG.info("Processing event for " + appAttemptID + " of type "
+ + event.getType());
+ final RMAppAttemptState oldState = getAppAttemptState();
+ try {
+ /* keep the master in sync with the state machine */
+ this.stateMachine.doTransition(event.getType(), event);
+ } catch (InvalidStateTransitonException e) {
+ LOG.error("Can't handle this event at current state", e);
+ /* TODO fail the application on the failed transition */
+ }
+
+ if (oldState != getAppAttemptState()) {
+ LOG.info(appAttemptID + " State change from " + oldState + " to "
+ + getAppAttemptState());
+ }
+ } finally {
+ this.writeLock.unlock();
+ }
+ }
+
+ private static class BaseTransition implements
+ SingleArcTransition<RMAppAttemptImpl, RMAppAttemptEvent> {
+
+ @Override
+ public void transition(RMAppAttemptImpl appAttempt,
+ RMAppAttemptEvent event) {
+ }
+
+ }
+
+ private static final class AttemptStartedTransition extends BaseTransition {
+ @Override
+ public void transition(RMAppAttemptImpl appAttempt,
+ RMAppAttemptEvent event) {
+
+ // Register with the ApplicationMasterService
+ appAttempt.masterService
+ .registerAppAttempt(appAttempt.applicationAttemptId);
+
+ // Add the application to the scheduler
+ appAttempt.eventHandler.handle(
+ new AppAddedSchedulerEvent(appAttempt.applicationAttemptId,
+ appAttempt.submissionContext.getQueue(),
+ appAttempt.submissionContext.getUser()));
+ }
+ }
+
+ private static final class AppRejectedTransition extends BaseTransition {
+ @Override
+ public void transition(RMAppAttemptImpl appAttempt,
+ RMAppAttemptEvent event) {
+
+ RMAppAttemptRejectedEvent rejectedEvent = (RMAppAttemptRejectedEvent) event;
+ // Send the rejection event to app
+ appAttempt.eventHandler.handle(new RMAppRejectedEvent(rejectedEvent
+ .getApplicationAttemptId().getApplicationId(), rejectedEvent
+ .getMessage()));
+ }
+ }
+
+ private static final List<ContainerId> EMPTY_CONTAINER_RELEASE_LIST =
+ new ArrayList<ContainerId>();
+ private static final List<ResourceRequest> EMPTY_CONTAINER_REQUEST_LIST =
+ new ArrayList<ResourceRequest>();
+
+ private static final class ScheduleTransition extends BaseTransition {
+ @Override
+ public void transition(RMAppAttemptImpl appAttempt,
+ RMAppAttemptEvent event) {
+
+ // Send the acceptance to the app
+ appAttempt.eventHandler.handle(new RMAppEvent(event
+ .getApplicationAttemptId().getApplicationId(),
+ RMAppEventType.APP_ACCEPTED));
+
+ // Request a container for the AM.
+ ResourceRequest request = BuilderUtils.newResourceRequest(
+ AM_CONTAINER_PRIORITY, "*", appAttempt.submissionContext
+ .getMasterCapability(), 1);
+ LOG.debug("About to request resources for AM of "
+ + appAttempt.applicationAttemptId + " required " + request);
+
+ appAttempt.scheduler.allocate(appAttempt.applicationAttemptId,
+ Collections.singletonList(request), EMPTY_CONTAINER_RELEASE_LIST);
+ }
+ }
+
+ private static final class AMContainerAllocatedTransition extends BaseTransition {
+ @Override
+ public void transition(RMAppAttemptImpl appAttempt,
+ RMAppAttemptEvent event) {
+
+ // Acquire the AM container from the scheduler.
+ Allocation amContainerAllocation = appAttempt.scheduler.allocate(
+ appAttempt.applicationAttemptId, EMPTY_CONTAINER_REQUEST_LIST,
+ EMPTY_CONTAINER_RELEASE_LIST);
+
+ // Set the masterContainer
+ appAttempt.masterContainer = amContainerAllocation.getContainers().get(
+ 0);
+
+ // Send event to launch the AM Container
+ appAttempt.eventHandler.handle(new AMLauncherEvent(
+ AMLauncherEventType.LAUNCH, appAttempt));
+ }
+ }
+
+ private static class BaseFinalTransition extends BaseTransition {
+
+ private final RMAppAttemptState finalAttemptState;
+
+ public BaseFinalTransition(RMAppAttemptState finalAttemptState) {
+ this.finalAttemptState = finalAttemptState;
+ }
+
+ @Override
+ public void transition(RMAppAttemptImpl appAttempt,
+ RMAppAttemptEvent event) {
+
+ // Tell the AMS. Unregister from the ApplicationMasterService
+ appAttempt.masterService
+ .unregisterAttempt(appAttempt.applicationAttemptId);
+
+ // Tell the application and the scheduler
+ RMAppEventType eventToApp = null;
+ switch (finalAttemptState) {
+ case FINISHED:
+ eventToApp = RMAppEventType.ATTEMPT_FINISHED;
+ break;
+ case KILLED:
+ eventToApp = RMAppEventType.ATTEMPT_KILLED;
+ break;
+ case FAILED:
+ eventToApp = RMAppEventType.ATTEMPT_FAILED;
+ break;
+ default:
+ LOG.info("Cannot get this state!! Error!!");
+ break;
+ }
+ appAttempt.eventHandler.handle(new RMAppEvent(
+ appAttempt.applicationAttemptId.getApplicationId(), eventToApp));
+ appAttempt.eventHandler.handle(new AppRemovedSchedulerEvent(appAttempt
+ .getAppAttemptId(), finalAttemptState));
+ }
+ }
+
+ private static final class AMLaunchedTransition extends BaseTransition {
+ @Override
+ public void transition(RMAppAttemptImpl appAttempt,
+ RMAppAttemptEvent event) {
+
+ // Register with AMLivelinessMonitor
+ appAttempt.rmContext.getAMLivelinessMonitor().register(
+ appAttempt.applicationAttemptId);
+
+ }
+ }
+
+ private static final class LaunchFailedTransition extends BaseFinalTransition {
+
+ public LaunchFailedTransition() {
+ super(RMAppAttemptState.FAILED);
+ }
+
+ @Override
+ public void transition(RMAppAttemptImpl appAttempt,
+ RMAppAttemptEvent event) {
+
+ // Use diagnostic from launcher
+ RMAppAttemptLaunchFailedEvent launchFaileEvent
+ = (RMAppAttemptLaunchFailedEvent) event;
+ appAttempt.diagnostics.append(launchFaileEvent.getMessage());
+
+ // Tell the app, scheduler
+ super.transition(appAttempt, event);
+
+ }
+ }
+
+ private static final class KillAllocatedAMTransition extends
+ BaseFinalTransition {
+ public KillAllocatedAMTransition() {
+ super(RMAppAttemptState.KILLED);
+ }
+
+ @Override
+ public void transition(RMAppAttemptImpl appAttempt,
+ RMAppAttemptEvent event) {
+
+ // Tell the application and scheduler
+ super.transition(appAttempt, event);
+
+ // Tell the launcher to cleanup.
+ appAttempt.eventHandler.handle(new AMLauncherEvent(
+ AMLauncherEventType.CLEANUP, appAttempt));
+
+ }
+ }
+
+ private static final class AMRegisteredTransition extends BaseTransition {
+ @Override
+ public void transition(RMAppAttemptImpl appAttempt,
+ RMAppAttemptEvent event) {
+
+ RMAppAttemptRegistrationEvent registrationEvent
+ = (RMAppAttemptRegistrationEvent) event;
+ appAttempt.host = registrationEvent.getHost();
+ appAttempt.rpcPort = registrationEvent.getRpcport();
+ appAttempt.trackingUrl = registrationEvent.getTrackingurl();
+
+ // Let the app know
+ appAttempt.eventHandler.handle(new RMAppEvent(appAttempt
+ .getAppAttemptId().getApplicationId(),
+ RMAppEventType.ATTEMPT_REGISTERED));
+ }
+ }
+
+ private static final class AMContainerCrashedTransition extends
+ BaseFinalTransition {
+
+ public AMContainerCrashedTransition() {
+ super(RMAppAttemptState.FAILED);
+ }
+
+ @Override
+ public void transition(RMAppAttemptImpl appAttempt,
+ RMAppAttemptEvent event) {
+
+ // UnRegister from AMLivelinessMonitor
+ appAttempt.rmContext.getAMLivelinessMonitor().unregister(
+ appAttempt.getAppAttemptId());
+
+ // Tell the app, scheduler
+ super.transition(appAttempt, event);
+
+ // Use diagnostic saying crashed.
+ appAttempt.diagnostics.append("AM Container for "
+ + appAttempt.getAppAttemptId() + " exited. Failing this attempt.");
+ }
+ }
+
+ private static class FinalTransition extends BaseFinalTransition {
+
+ public FinalTransition(RMAppAttemptState finalAttemptState) {
+ super(finalAttemptState);
+ }
+
+ @Override
+ public void transition(RMAppAttemptImpl appAttempt,
+ RMAppAttemptEvent event) {
+
+ // Tell the app and the scheduler
+ super.transition(appAttempt, event);
+
+ // UnRegister from AMLivelinessMonitor
+ appAttempt.rmContext.getAMLivelinessMonitor().unregister(
+ appAttempt.getAppAttemptId());
+
+ // Tell the launcher to cleanup.
+ appAttempt.eventHandler.handle(new AMLauncherEvent(
+ AMLauncherEventType.CLEANUP, appAttempt));
+ }
+ }
+
+ private static final class StatusUpdateTransition extends
+ BaseTransition {
+ @Override
+ public void transition(RMAppAttemptImpl appAttempt,
+ RMAppAttemptEvent event) {
+
+ RMAppAttemptStatusupdateEvent statusUpdateEvent
+ = (RMAppAttemptStatusupdateEvent) event;
+
+ // Update progress
+ appAttempt.progress = statusUpdateEvent.getProgress();
+
+ // Ping to AMLivelinessMonitor
+ appAttempt.rmContext.getAMLivelinessMonitor().receivedPing(
+ statusUpdateEvent.getApplicationAttemptId());
+ }
+ }
+
+ private static final class AMUnregisteredTransition extends FinalTransition {
+
+ public AMUnregisteredTransition() {
+ super(RMAppAttemptState.FINISHED);
+ }
+
+ @Override
+ public void transition(RMAppAttemptImpl appAttempt,
+ RMAppAttemptEvent event) {
+
+ RMAppAttemptUnregistrationEvent unregisterEvent
+ = (RMAppAttemptUnregistrationEvent) event;
+ unregisterEvent.getFinalState();
+ appAttempt.diagnostics.append(unregisterEvent.getDiagnostics());
+ appAttempt.trackingUrl = unregisterEvent.getTrackingUrl();
+ appAttempt.finalState = unregisterEvent.getFinalState();
+
+ // Tell the app and the scheduler
+ super.transition(appAttempt, event);
+ }
+ }
+
+ private static final class ContainerAcquiredTransition extends
+ BaseTransition {
+ @Override
+ public void transition(RMAppAttemptImpl appAttempt,
+ RMAppAttemptEvent event) {
+ RMAppAttemptContainerAcquiredEvent acquiredEvent
+ = (RMAppAttemptContainerAcquiredEvent) event;
+ appAttempt.ranNodes.add(acquiredEvent.getContainer().getNodeId());
+ }
+ }
+
+ private static final class ContainerFinishedTransition
+ implements
+ MultipleArcTransition<RMAppAttemptImpl, RMAppAttemptEvent, RMAppAttemptState> {
+
+ @Override
+ public RMAppAttemptState transition(RMAppAttemptImpl appAttempt,
+ RMAppAttemptEvent event) {
+
+ RMAppAttemptContainerFinishedEvent containerFinishedEvent
+ = (RMAppAttemptContainerFinishedEvent) event;
+ Container container = containerFinishedEvent.getContainer();
+
+ // Is this container the AmContainer? If the finished container is same as
+ // the AMContainer, AppAttempt fails
+ if (appAttempt.masterContainer.getId().equals(container.getId())) {
+ new FinalTransition(RMAppAttemptState.FAILED).transition(
+ appAttempt, containerFinishedEvent);
+ return RMAppAttemptState.FAILED;
+ }
+
+ // Normal container.
+
+ // Put it in completedcontainers list
+ appAttempt.justFinishedContainers.add(container);
+ return RMAppAttemptState.RUNNING;
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptState.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptState.java
new file mode 100644
index 0000000..5d81360
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptState.java
@@ -0,0 +1,6 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt;
+
+public enum RMAppAttemptState {
+ NEW, SUBMITTED, SCHEDULED, ALLOCATED, LAUNCHED, FAILED, RUNNING, FINISHED,
+ KILLED,
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptContainerAcquiredEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptContainerAcquiredEvent.java
new file mode 100644
index 0000000..68d3a97
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptContainerAcquiredEvent.java
@@ -0,0 +1,22 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
+
+public class RMAppAttemptContainerAcquiredEvent extends RMAppAttemptEvent {
+
+ private final Container container;
+
+ public RMAppAttemptContainerAcquiredEvent(ApplicationAttemptId appAttemptId,
+ Container container) {
+ super(appAttemptId, RMAppAttemptEventType.CONTAINER_ACQUIRED);
+ this.container = container;
+ }
+
+ public Container getContainer() {
+ return this.container;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptContainerAllocatedEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptContainerAllocatedEvent.java
new file mode 100644
index 0000000..fb38fa4
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptContainerAllocatedEvent.java
@@ -0,0 +1,22 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
+
+public class RMAppAttemptContainerAllocatedEvent extends RMAppAttemptEvent {
+
+ private final Container container;
+
+ public RMAppAttemptContainerAllocatedEvent(ApplicationAttemptId appAttemptId,
+ Container container) {
+ super(appAttemptId, RMAppAttemptEventType.CONTAINER_ALLOCATED);
+ this.container = container;
+ }
+
+ public Container getContainer() {
+ return this.container;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptContainerFinishedEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptContainerFinishedEvent.java
new file mode 100644
index 0000000..e6565e7
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptContainerFinishedEvent.java
@@ -0,0 +1,22 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
+
+public class RMAppAttemptContainerFinishedEvent extends RMAppAttemptEvent {
+
+ private final Container container;
+
+ public RMAppAttemptContainerFinishedEvent(ApplicationAttemptId appAttemptId,
+ Container container) {
+ super(appAttemptId, RMAppAttemptEventType.CONTAINER_FINISHED);
+ this.container = container;
+ }
+
+ public Container getContainer() {
+ return this.container;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptLaunchFailedEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptLaunchFailedEvent.java
new file mode 100644
index 0000000..104eeef
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptLaunchFailedEvent.java
@@ -0,0 +1,20 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
+
+public class RMAppAttemptLaunchFailedEvent extends RMAppAttemptEvent {
+
+ private final String message;
+
+ public RMAppAttemptLaunchFailedEvent(ApplicationAttemptId appAttemptId,
+ String message) {
+ super(appAttemptId, RMAppAttemptEventType.LAUNCH_FAILED);
+ this.message = message;
+ }
+
+ public String getMessage() {
+ return this.message;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptRegistrationEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptRegistrationEvent.java
new file mode 100644
index 0000000..aa13775
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptRegistrationEvent.java
@@ -0,0 +1,34 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
+
+public class RMAppAttemptRegistrationEvent extends RMAppAttemptEvent {
+
+ private final ApplicationAttemptId appAttemptId;
+ private final String host;
+ private int rpcport;
+ private String trackingurl;
+
+ public RMAppAttemptRegistrationEvent(ApplicationAttemptId appAttemptId,
+ String host, int rpcPort, String trackingUrl) {
+ super(appAttemptId, RMAppAttemptEventType.REGISTERED);
+ this.appAttemptId = appAttemptId;
+ this.host = host;
+ this.rpcport = rpcPort;
+ this.trackingurl = trackingUrl;
+ }
+
+ public String getHost() {
+ return this.host;
+ }
+
+ public int getRpcport() {
+ return this.rpcport;
+ }
+
+ public String getTrackingurl() {
+ return this.trackingurl;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptRejectedEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptRejectedEvent.java
new file mode 100644
index 0000000..5804c1a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptRejectedEvent.java
@@ -0,0 +1,19 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
+
+public class RMAppAttemptRejectedEvent extends RMAppAttemptEvent {
+
+ private final String message;
+
+ public RMAppAttemptRejectedEvent(ApplicationAttemptId appAttemptId, String message) {
+ super(appAttemptId, RMAppAttemptEventType.APP_REJECTED);
+ this.message = message;
+ }
+
+ public String getMessage() {
+ return this.message;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptStatusupdateEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptStatusupdateEvent.java
new file mode 100644
index 0000000..d9c6cdf
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptStatusupdateEvent.java
@@ -0,0 +1,21 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
+
+public class RMAppAttemptStatusupdateEvent extends RMAppAttemptEvent {
+
+ private final float progress;
+
+ public RMAppAttemptStatusupdateEvent(ApplicationAttemptId appAttemptId,
+ float progress) {
+ super(appAttemptId, RMAppAttemptEventType.STATUS_UPDATE);
+ this.progress = progress;
+ }
+
+ public float getProgress() {
+ return this.progress;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptUnregistrationEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptUnregistrationEvent.java
new file mode 100644
index 0000000..abf8e97
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptUnregistrationEvent.java
@@ -0,0 +1,33 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
+
+public class RMAppAttemptUnregistrationEvent extends RMAppAttemptEvent {
+
+ private final String trackingUrl;
+ private final String finalState;
+ private final String diagnostics;
+
+ public RMAppAttemptUnregistrationEvent(ApplicationAttemptId appAttemptId,
+ String trackingUrl, String finalState, String diagnostics) {
+ super(appAttemptId, RMAppAttemptEventType.UNREGISTERED);
+ this.trackingUrl = trackingUrl;
+ this.finalState = finalState;
+ this.diagnostics = diagnostics;
+ }
+
+ public String getTrackingUrl() {
+ return this.trackingUrl;
+ }
+
+ public String getFinalState() {
+ return this.finalState;
+ }
+
+ public String getDiagnostics() {
+ return this.diagnostics;
+ }
+
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/ContainerAllocationExpirer.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/ContainerAllocationExpirer.java
new file mode 100644
index 0000000..fdd467e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/ContainerAllocationExpirer.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.rmcontainer;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.SystemClock;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.resourcemanager.RMConfig;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent;
+import org.apache.hadoop.yarn.util.AbstractLivelinessMonitor;
+
+public class ContainerAllocationExpirer extends
+ AbstractLivelinessMonitor<ContainerId> {
+
+ private EventHandler dispatcher;
+
+ public ContainerAllocationExpirer(Dispatcher d) {
+ super(ContainerAllocationExpirer.class.getName(), new SystemClock());
+ this.dispatcher = d.getEventHandler();
+ }
+
+ public void init(Configuration conf) {
+ super.init(conf);
+ setExpireInterval(conf.getInt(
+ RMConfig.CONTAINER_LIVELINESS_MONITORING_INTERVAL,
+ RMConfig.DEFAULT_CONTAINER_LIVELINESS_MONITORING_INTERVAL));
+ setMonitorInterval(conf.getInt(RMConfig.AMLIVELINESS_MONITORING_INTERVAL,
+ RMConfig.DEFAULT_AMLIVELINESS_MONITORING_INTERVAL));
+ }
+
+ @Override
+ protected void expire(ContainerId containerId) {
+ dispatcher.handle(new ContainerExpiredSchedulerEvent(containerId));
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
new file mode 100644
index 0000000..b4822a8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
@@ -0,0 +1,27 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmcontainer;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.event.EventHandler;
+
+public interface RMContainer extends EventHandler<RMContainerEvent> {
+
+ ContainerId getContainerId();
+
+ ApplicationAttemptId getApplicationAttemptId();
+
+ RMContainerState getState();
+
+ Container getContainer();
+
+ Resource getReservedResource();
+
+ NodeId getReservedNode();
+
+ Priority getReservedPriority();
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerEvent.java
new file mode 100644
index 0000000..ac0d014
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerEvent.java
@@ -0,0 +1,18 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmcontainer;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.event.AbstractEvent;
+
+public class RMContainerEvent extends AbstractEvent<RMContainerEventType> {
+
+ private final ContainerId containerId;
+
+ public RMContainerEvent(ContainerId containerId, RMContainerEventType type) {
+ super(type);
+ this.containerId = containerId;
+ }
+
+ public ContainerId getContainerId() {
+ return this.containerId;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerEventType.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerEventType.java
new file mode 100644
index 0000000..3181492
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerEventType.java
@@ -0,0 +1,21 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmcontainer;
+
+public enum RMContainerEventType {
+
+ // Source: scheduler
+ START,
+
+ // Source: SchedulerApp
+ ACQUIRED,
+ KILL, // Also from Node on NodeRemoval
+ RESERVED,
+
+ LAUNCHED,
+ FINISHED,
+
+ // Source: ApplicationMasterService->Scheduler
+ RELEASED,
+
+ // Source: ContainerAllocationExpirer
+ EXPIRE
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerFinishedEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerFinishedEvent.java
new file mode 100644
index 0000000..88a3406
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerFinishedEvent.java
@@ -0,0 +1,19 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmcontainer;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+
+public class RMContainerFinishedEvent extends RMContainerEvent {
+
+ private final ContainerStatus remoteContainerStatus;
+
+ public RMContainerFinishedEvent(ContainerId containerId,
+ ContainerStatus containerStatus) {
+ super(containerId, RMContainerEventType.FINISHED);
+ this.remoteContainerStatus = containerStatus;
+ }
+
+ public ContainerStatus getRemoteContainerStatus() {
+ return this.remoteContainerStatus;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
new file mode 100644
index 0000000..cc13642
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -0,0 +1,315 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmcontainer;
+
+import java.util.EnumSet;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAcquiredEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAllocatedEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent;
+import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.SingleArcTransition;
+import org.apache.hadoop.yarn.state.StateMachine;
+import org.apache.hadoop.yarn.state.StateMachineFactory;
+
+public class RMContainerImpl implements RMContainer {
+
+ private static final Log LOG = LogFactory.getLog(RMContainerImpl.class);
+
+ private static final StateMachineFactory<RMContainerImpl, RMContainerState,
+ RMContainerEventType, RMContainerEvent>
+ stateMachineFactory = new StateMachineFactory<RMContainerImpl,
+ RMContainerState, RMContainerEventType, RMContainerEvent>(
+ RMContainerState.NEW)
+
+ // Transitions from NEW state
+ .addTransition(RMContainerState.NEW, RMContainerState.ALLOCATED,
+ RMContainerEventType.START, new ContainerStartedTransition())
+ .addTransition(RMContainerState.NEW, RMContainerState.KILLED,
+ RMContainerEventType.KILL)
+ .addTransition(RMContainerState.NEW, RMContainerState.RESERVED,
+ RMContainerEventType.RESERVED, new ContainerReservedTransition())
+
+ // Transitions from RESERVED state
+ .addTransition(RMContainerState.RESERVED, RMContainerState.RESERVED,
+ RMContainerEventType.RESERVED, new ContainerReservedTransition())
+ .addTransition(RMContainerState.RESERVED, RMContainerState.ALLOCATED,
+ RMContainerEventType.START, new ContainerStartedTransition())
+ .addTransition(RMContainerState.RESERVED, RMContainerState.KILLED,
+ RMContainerEventType.KILL) // nothing to do
+ .addTransition(RMContainerState.RESERVED, RMContainerState.RELEASED,
+ RMContainerEventType.RELEASED) // nothing to do
+
+
+ // Transitions from ALLOCATED state
+ .addTransition(RMContainerState.ALLOCATED, RMContainerState.ACQUIRED,
+ RMContainerEventType.ACQUIRED, new AcquiredTransition())
+ .addTransition(RMContainerState.ALLOCATED, RMContainerState.EXPIRED,
+ RMContainerEventType.EXPIRE, new FinishedTransition())
+ .addTransition(RMContainerState.ALLOCATED, RMContainerState.KILLED,
+ RMContainerEventType.KILL, new FinishedTransition())
+
+ // Transitions from ACQUIRED state
+ .addTransition(RMContainerState.ACQUIRED, RMContainerState.RUNNING,
+ RMContainerEventType.LAUNCHED, new LaunchedTransition())
+ .addTransition(RMContainerState.ACQUIRED, RMContainerState.COMPLETED,
+ RMContainerEventType.FINISHED, new ContainerFinishedAtAcquiredState())
+ .addTransition(RMContainerState.ACQUIRED, RMContainerState.RELEASED,
+ RMContainerEventType.RELEASED, new KillTransition())
+ .addTransition(RMContainerState.ACQUIRED, RMContainerState.EXPIRED,
+ RMContainerEventType.EXPIRE, new KillTransition())
+ .addTransition(RMContainerState.ACQUIRED, RMContainerState.KILLED,
+ RMContainerEventType.KILL, new KillTransition())
+
+ // Transitions from RUNNING state
+ .addTransition(RMContainerState.RUNNING, RMContainerState.COMPLETED,
+ RMContainerEventType.FINISHED, new ContainerCompletedTransition())
+ .addTransition(RMContainerState.RUNNING, RMContainerState.KILLED,
+ RMContainerEventType.KILL, new KillTransition())
+
+ // Transitions from COMPLETED state
+ .addTransition(RMContainerState.COMPLETED, RMContainerState.COMPLETED,
+ EnumSet.of(RMContainerEventType.RELEASED, RMContainerEventType.KILL))
+
+ // Transitions from EXPIRED state
+ .addTransition(RMContainerState.EXPIRED, RMContainerState.EXPIRED,
+ EnumSet.of(RMContainerEventType.RELEASED, RMContainerEventType.KILL))
+
+ // Transitions from RELEASED state
+ .addTransition(RMContainerState.RELEASED, RMContainerState.RELEASED,
+ EnumSet.of(RMContainerEventType.RELEASED, RMContainerEventType.KILL))
+
+ // Transitions from KILLED state
+ .addTransition(RMContainerState.KILLED, RMContainerState.KILLED,
+ EnumSet.of(RMContainerEventType.RELEASED, RMContainerEventType.KILL))
+
+ // create the topology tables
+ .installTopology();
+
+
+
+ private final StateMachine<RMContainerState, RMContainerEventType,
+ RMContainerEvent> stateMachine;
+ private final ReadLock readLock;
+ private final WriteLock writeLock;
+ private final ContainerId containerId;
+ private final ApplicationAttemptId appAttemptId;
+ private final NodeId nodeId;
+ private final Container container;
+ private final EventHandler eventHandler;
+ private final ContainerAllocationExpirer containerAllocationExpirer;
+
+ private Resource reservedResource;
+ private NodeId reservedNode;
+ private Priority reservedPriority;
+
+ public RMContainerImpl(Container container,
+ ApplicationAttemptId appAttemptId, NodeId nodeId,
+ EventHandler handler,
+ ContainerAllocationExpirer containerAllocationExpirer) {
+ this.stateMachine = stateMachineFactory.make(this);
+ this.containerId = container.getId();
+ this.nodeId = nodeId;
+ this.container = container;
+ this.appAttemptId = appAttemptId;
+ this.eventHandler = handler;
+ this.containerAllocationExpirer = containerAllocationExpirer;
+
+ ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+ this.readLock = lock.readLock();
+ this.writeLock = lock.writeLock();
+ }
+
+ @Override
+ public ContainerId getContainerId() {
+ return this.containerId;
+ }
+
+ @Override
+ public ApplicationAttemptId getApplicationAttemptId() {
+ return this.appAttemptId;
+ }
+
+ @Override
+ public Container getContainer() {
+ return this.container;
+ }
+
+ @Override
+ public RMContainerState getState() {
+ this.readLock.lock();
+
+ try {
+ return this.stateMachine.getCurrentState();
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public Resource getReservedResource() {
+ return reservedResource;
+ }
+
+ @Override
+ public NodeId getReservedNode() {
+ return reservedNode;
+ }
+
+ @Override
+ public Priority getReservedPriority() {
+ return reservedPriority;
+ }
+
+ @Override
+ public void handle(RMContainerEvent event) {
+ LOG.info("Processing " + event.getContainerId() + " of type " + event.getType());
+ try {
+ writeLock.lock();
+ RMContainerState oldState = getState();
+ try {
+ stateMachine.doTransition(event.getType(), event);
+ } catch (InvalidStateTransitonException e) {
+ LOG.error("Can't handle this event at current state", e);
+ LOG.error("Invalid event " + event.getType() +
+ " on container " + this.containerId);
+ }
+ if (oldState != getState()) {
+ LOG.info(event.getContainerId() + " Container Transitioned from "
+ + oldState + " to " + getState());
+ }
+ }
+
+ finally {
+ writeLock.unlock();
+ }
+ }
+
+ private static class BaseTransition implements
+ SingleArcTransition<RMContainerImpl, RMContainerEvent> {
+
+ @Override
+ public void transition(RMContainerImpl cont, RMContainerEvent event) {
+
+ }
+ }
+
+ private static final class ContainerReservedTransition extends
+ BaseTransition {
+
+ @Override
+ public void transition(RMContainerImpl container, RMContainerEvent event) {
+ RMContainerReservedEvent e = (RMContainerReservedEvent)event;
+ container.reservedResource = e.getReservedResource();
+ container.reservedNode = e.getReservedNode();
+ container.reservedPriority = e.getReservedPriority();
+ }
+ }
+
+
+ private static final class ContainerStartedTransition extends
+ BaseTransition {
+
+ @Override
+ public void transition(RMContainerImpl container, RMContainerEvent event) {
+ container.eventHandler.handle(new RMAppAttemptContainerAllocatedEvent(
+ container.appAttemptId, container.container));
+ }
+ }
+
+ private static final class AcquiredTransition extends BaseTransition {
+
+ @Override
+ public void transition(RMContainerImpl container, RMContainerEvent event) {
+ // Register with containerAllocationExpirer.
+ container.containerAllocationExpirer.register(container.getContainerId());
+
+ // Tell the appAttempt
+ container.eventHandler.handle(new RMAppAttemptContainerAcquiredEvent(
+ container.getApplicationAttemptId(), container.getContainer()));
+ }
+ }
+
+ private static final class LaunchedTransition extends BaseTransition {
+
+ @Override
+ public void transition(RMContainerImpl container, RMContainerEvent event) {
+ // Unregister from containerAllocationExpirer.
+ container.containerAllocationExpirer.unregister(container
+ .getContainerId());
+ }
+ }
+
+ private static class FinishedTransition extends BaseTransition {
+
+ @Override
+ public void transition(RMContainerImpl container, RMContainerEvent event) {
+
+ // Inform AppAttempt
+ container.eventHandler.handle(new RMAppAttemptContainerFinishedEvent(
+ container.appAttemptId, container.container));
+ }
+ }
+
+ private static final class ContainerFinishedAtAcquiredState extends
+ FinishedTransition {
+ @Override
+ public void transition(RMContainerImpl container, RMContainerEvent event) {
+
+ // Unregister from containerAllocationExpirer.
+ container.containerAllocationExpirer.unregister(container
+ .getContainerId());
+
+ // Inform AppAttempt
+ super.transition(container, event);
+ }
+ }
+
+ private static final class KillTransition extends FinishedTransition {
+
+ @Override
+ public void transition(RMContainerImpl container, RMContainerEvent event) {
+
+ // Unregister from containerAllocationExpirer.
+ container.containerAllocationExpirer.unregister(container
+ .getContainerId());
+
+ // Inform node
+ container.eventHandler.handle(new RMNodeCleanContainerEvent(
+ container.nodeId, container.containerId));
+
+ // Inform appAttempt
+ super.transition(container, event);
+ }
+ }
+
+ private static final class ContainerCompletedTransition extends
+ FinishedTransition {
+
+ @Override
+ public void transition(RMContainerImpl container, RMContainerEvent event) {
+
+ RMContainerFinishedEvent finishedEvent = (RMContainerFinishedEvent) event;
+
+ // Update container-status for diagnostics. Today we completely
+ // replace it on finish. We may just need to update diagnostics.
+ // ^TODO
+ container.container.setContainerStatus(finishedEvent
+ .getRemoteContainerStatus());
+
+ // Inform appAttempt
+ super.transition(container, event);
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerReservedEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerReservedEvent.java
new file mode 100644
index 0000000..30dabdf
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerReservedEvent.java
@@ -0,0 +1,41 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmcontainer;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+
+/**
+ * The event signifying that a container has been reserved.
+ *
+ * The event encapsulates information on the amount of reservation
+ * and the node on which the reservation is in effect.
+ */
+public class RMContainerReservedEvent extends RMContainerEvent {
+
+ private final Resource reservedResource;
+ private final NodeId reservedNode;
+ private final Priority reservedPriority;
+
+ public RMContainerReservedEvent(ContainerId containerId,
+ Resource reservedResource, NodeId reservedNode,
+ Priority reservedPriority) {
+ super(containerId, RMContainerEventType.RESERVED);
+ this.reservedResource = reservedResource;
+ this.reservedNode = reservedNode;
+ this.reservedPriority = reservedPriority;
+ }
+
+ public Resource getReservedResource() {
+ return reservedResource;
+ }
+
+ public NodeId getReservedNode() {
+ return reservedNode;
+ }
+
+ public Priority getReservedPriority() {
+ return reservedPriority;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerState.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerState.java
new file mode 100644
index 0000000..59e8caa
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerState.java
@@ -0,0 +1,13 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmcontainer;
+
+public enum RMContainerState {
+ NEW,
+ RESERVED,
+ ALLOCATED,
+ ACQUIRED,
+ RUNNING,
+ COMPLETED,
+ EXPIRED,
+ RELEASED,
+ KILLED
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
new file mode 100644
index 0000000..494dffc
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
+
+
+import java.util.List;
+
+import org.apache.hadoop.net.Node;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
+
+/**
+ * Node managers information on available resources
+ * and other static information.
+ *
+ */
+public interface RMNode {
+
+ public static final String ANY = "*";
+
+ /**
+ * the node id of of this node.
+ * @return the node id of this node.
+ */
+ public NodeId getNodeID();
+
+ /**
+ * the hostname of this node
+ * @return hostname of this node
+ */
+ public String getHostName();
+
+ /**
+ * the command port for this node
+ * @return command port for this node
+ */
+ public int getCommandPort();
+
+ /**
+ * the http port for this node
+ * @return http port for this node
+ */
+ public int getHttpPort();
+
+
+ /**
+ * the ContainerManager address for this node.
+ * @return the ContainerManager address for this node.
+ */
+ public String getNodeAddress();
+
+ /**
+ * the http-Address for this node.
+ * @return the http-url address for this node
+ */
+ public String getHttpAddress();
+
+ /**
+ * the health-status for this node
+ * @return the health-status for this node.
+ */
+ public NodeHealthStatus getNodeHealthStatus();
+
+ /**
+ * the total available resource.
+ * @return the total available resource.
+ */
+ public org.apache.hadoop.yarn.api.records.Resource getTotalCapability();
+
+ /**
+ * The rack name for this node manager.
+ * @return the rack name.
+ */
+ public String getRackName();
+
+ /**
+ * the {@link Node} information for this node.
+ * @return {@link Node} information for this node.
+ */
+ public Node getNode();
+
+ public RMNodeState getState();
+
+ public List<ContainerId> pullContainersToCleanUp();
+
+ public List<ApplicationId> pullAppsToCleanup();
+
+ public HeartbeatResponse getLastHeartBeatResponse();
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeCleanAppEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeCleanAppEvent.java
new file mode 100644
index 0000000..18f9e49
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeCleanAppEvent.java
@@ -0,0 +1,18 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+
+public class RMNodeCleanAppEvent extends RMNodeEvent {
+
+ private ApplicationId appId;
+
+ public RMNodeCleanAppEvent(NodeId nodeId, ApplicationId appId) {
+ super(nodeId, RMNodeEventType.CLEANUP_APP);
+ this.appId = appId;
+ }
+
+ public ApplicationId getAppId() {
+ return this.appId;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeCleanContainerEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeCleanContainerEvent.java
new file mode 100644
index 0000000..50fdaa8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeCleanContainerEvent.java
@@ -0,0 +1,18 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+
+public class RMNodeCleanContainerEvent extends RMNodeEvent {
+
+ private ContainerId contId;
+
+ public RMNodeCleanContainerEvent(NodeId nodeId, ContainerId contId) {
+ super(nodeId, RMNodeEventType.CLEANUP_CONTAINER);
+ this.contId = contId;
+ }
+
+ public ContainerId getContainerId() {
+ return this.contId;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEvent.java
new file mode 100644
index 0000000..7d9c6df
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEvent.java
@@ -0,0 +1,18 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
+
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.event.AbstractEvent;
+
+public class RMNodeEvent extends AbstractEvent<RMNodeEventType> {
+
+ private final NodeId nodeId;
+
+ public RMNodeEvent(NodeId nodeId, RMNodeEventType type) {
+ super(type);
+ this.nodeId = nodeId;
+ }
+
+ public NodeId getNodeId() {
+ return this.nodeId;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java
new file mode 100644
index 0000000..41b3e83
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java
@@ -0,0 +1,20 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
+
+public enum RMNodeEventType {
+ // Source: AdminService
+ DECOMMISSION,
+
+ // ResourceTrackerService
+ STATUS_UPDATE,
+ REBOOTING,
+
+ // Source: Application
+ CLEANUP_APP,
+
+ // Source: Container
+ CONTAINER_ALLOCATED,
+ CLEANUP_CONTAINER,
+
+ // Source: NMLivelinessMonitor
+ EXPIRE
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
new file mode 100644
index 0000000..ceb2843
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -0,0 +1,420 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
+
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.net.Node;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
+import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.MultipleArcTransition;
+import org.apache.hadoop.yarn.state.SingleArcTransition;
+import org.apache.hadoop.yarn.state.StateMachine;
+import org.apache.hadoop.yarn.state.StateMachineFactory;
+import org.apache.hadoop.yarn.util.BuilderUtils.ContainerIdComparator;
+
+/**
+ * This class is used to keep track of all the applications/containers
+ * running on a node.
+ *
+ */
+@Private
+@Unstable
+public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
+
+ private static final Log LOG = LogFactory.getLog(RMNodeImpl.class);
+
+ private static final RecordFactory recordFactory = RecordFactoryProvider
+ .getRecordFactory(null);
+
+ private final ReadLock readLock;
+ private final WriteLock writeLock;
+
+ private final NodeId nodeId;
+ private final RMContext context;
+ private final String hostName;
+ private final int commandPort;
+ private final int httpPort;
+ private final String nodeAddress; // The containerManager address
+ private final String httpAddress;
+ private final Resource totalCapability;
+ private final Node node;
+ private final NodeHealthStatus nodeHealthStatus = recordFactory
+ .newRecordInstance(NodeHealthStatus.class);
+
+ /* set of containers that have just launched */
+ private final Map<ContainerId, Container> justLaunchedContainers =
+ new HashMap<ContainerId, Container>();
+
+
+ /* set of containers that need to be cleaned */
+ private final Set<ContainerId> containersToClean = new TreeSet<ContainerId>(
+ new ContainerIdComparator());
+
+ /* the list of applications that have finished and need to be purged */
+ private final List<ApplicationId> finishedApplications = new ArrayList<ApplicationId>();
+
+ private HeartbeatResponse latestHeartBeatResponse = recordFactory
+ .newRecordInstance(HeartbeatResponse.class);
+
+ private static final StateMachineFactory<RMNodeImpl,
+ RMNodeState,
+ RMNodeEventType,
+ RMNodeEvent> stateMachineFactory
+ = new StateMachineFactory<RMNodeImpl,
+ RMNodeState,
+ RMNodeEventType,
+ RMNodeEvent>(RMNodeState.RUNNING)
+
+ //Transitions from RUNNING state
+ .addTransition(RMNodeState.RUNNING,
+ EnumSet.of(RMNodeState.RUNNING, RMNodeState.UNHEALTHY),
+ RMNodeEventType.STATUS_UPDATE, new StatusUpdateWhenHealthyTransition())
+ .addTransition(RMNodeState.RUNNING, RMNodeState.DECOMMISSIONED,
+ RMNodeEventType.DECOMMISSION, new RemoveNodeTransition())
+ .addTransition(RMNodeState.RUNNING, RMNodeState.LOST,
+ RMNodeEventType.EXPIRE, new RemoveNodeTransition())
+ .addTransition(RMNodeState.RUNNING, RMNodeState.LOST,
+ RMNodeEventType.REBOOTING, new RemoveNodeTransition())
+ .addTransition(RMNodeState.RUNNING, RMNodeState.RUNNING,
+ RMNodeEventType.CLEANUP_APP, new CleanUpAppTransition())
+ .addTransition(RMNodeState.RUNNING, RMNodeState.RUNNING,
+ RMNodeEventType.CLEANUP_CONTAINER, new CleanUpContainerTransition())
+
+ //Transitions from UNHEALTHY state
+ .addTransition(RMNodeState.UNHEALTHY,
+ EnumSet.of(RMNodeState.UNHEALTHY, RMNodeState.RUNNING),
+ RMNodeEventType.STATUS_UPDATE, new StatusUpdateWhenUnHealthyTransition())
+
+ // create the topology tables
+ .installTopology();
+
+ private final StateMachine<RMNodeState, RMNodeEventType,
+ RMNodeEvent> stateMachine;
+
+ public RMNodeImpl(NodeId nodeId, RMContext context, String hostName,
+ int cmPort, int httpPort, Node node, Resource capability) {
+ this.nodeId = nodeId;
+ this.context = context;
+ this.hostName = hostName;
+ this.commandPort = cmPort;
+ this.httpPort = httpPort;
+ this.totalCapability = capability;
+ this.nodeAddress = hostName + ":" + cmPort;
+ this.httpAddress = hostName + ":" + httpPort;;
+ this.node = node;
+ this.nodeHealthStatus.setIsNodeHealthy(true);
+ this.nodeHealthStatus.setLastHealthReportTime(System.currentTimeMillis());
+
+ this.latestHeartBeatResponse.setResponseId(0);
+
+ ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+ this.readLock = lock.readLock();
+ this.writeLock = lock.writeLock();
+
+ this.stateMachine = stateMachineFactory.make(this);
+
+ context.getDispatcher().getEventHandler().handle(
+ new NodeAddedSchedulerEvent(this));
+ }
+
+ @Override
+ public String toString() {
+ return this.nodeId.toString();
+ }
+
+ @Override
+ public String getHostName() {
+ return hostName;
+ }
+
+ @Override
+ public int getCommandPort() {
+ return commandPort;
+ }
+
+ @Override
+ public int getHttpPort() {
+ return httpPort;
+ }
+
+ @Override
+ public NodeId getNodeID() {
+ return this.nodeId;
+ }
+
+ @Override
+ public String getNodeAddress() {
+ return this.nodeAddress;
+ }
+
+ @Override
+ public String getHttpAddress() {
+ return this.httpAddress;
+ }
+
+ @Override
+ public Resource getTotalCapability() {
+ return this.totalCapability;
+ }
+
+ @Override
+ public String getRackName() {
+ return node.getNetworkLocation();
+ }
+
+ @Override
+ public Node getNode() {
+ return this.node;
+ }
+
+ @Override
+ public NodeHealthStatus getNodeHealthStatus() {
+ this.readLock.lock();
+
+ try {
+ return this.nodeHealthStatus;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public RMNodeState getState() {
+ this.readLock.lock();
+
+ try {
+ return this.stateMachine.getCurrentState();
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public List<ApplicationId> pullAppsToCleanup() {
+ this.writeLock.lock();
+
+ try {
+ List<ApplicationId> lastfinishedApplications = new ArrayList<ApplicationId>();
+ lastfinishedApplications.addAll(this.finishedApplications);
+ this.finishedApplications.clear();
+ return lastfinishedApplications;
+ } finally {
+ this.writeLock.unlock();
+ }
+
+ }
+
+ @Override
+ public List<ContainerId> pullContainersToCleanUp() {
+
+ this.writeLock.lock();
+
+ try {
+ List<ContainerId> containersToCleanUp = new ArrayList<ContainerId>();
+ containersToCleanUp.addAll(this.containersToClean);
+ this.containersToClean.clear();
+ return containersToCleanUp;
+ } finally {
+ this.writeLock.unlock();
+ }
+ };
+
+ @Override
+ public HeartbeatResponse getLastHeartBeatResponse() {
+
+ this.writeLock.lock();
+
+ try {
+ return this.latestHeartBeatResponse;
+ } finally {
+ this.writeLock.unlock();
+ }
+ }
+
+ public void handle(RMNodeEvent event) {
+ LOG.info("Processing " + event.getNodeId() + " of type " + event.getType());
+ try {
+ writeLock.lock();
+ RMNodeState oldState = getState();
+ try {
+ stateMachine.doTransition(event.getType(), event);
+ } catch (InvalidStateTransitonException e) {
+ LOG.error("Can't handle this event at current state", e);
+ LOG.error("Invalid event " + event.getType() +
+ " on Node " + this.nodeId);
+ }
+ if (oldState != getState()) {
+ LOG.info(nodeId + " Node Transitioned from " + oldState + " to "
+ + getState());
+ }
+ }
+
+ finally {
+ writeLock.unlock();
+ }
+ }
+
+ public static class CleanUpAppTransition
+ implements SingleArcTransition<RMNodeImpl, RMNodeEvent> {
+
+ @Override
+ public void transition(RMNodeImpl rmNode, RMNodeEvent event) {
+ rmNode.finishedApplications.add(((
+ RMNodeCleanAppEvent) event).getAppId());
+ }
+ }
+
+ public static class CleanUpContainerTransition implements
+ SingleArcTransition<RMNodeImpl, RMNodeEvent> {
+
+ @Override
+ public void transition(RMNodeImpl rmNode, RMNodeEvent event) {
+
+ rmNode.containersToClean.add(((
+ RMNodeCleanContainerEvent) event).getContainerId());
+ }
+ }
+
+ public static class RemoveNodeTransition
+ implements SingleArcTransition<RMNodeImpl, RMNodeEvent> {
+
+ @Override
+ public void transition(RMNodeImpl rmNode, RMNodeEvent event) {
+ // Inform the scheduler
+ rmNode.context.getDispatcher().getEventHandler().handle(
+ new NodeRemovedSchedulerEvent(rmNode));
+
+ // Remove the node from the system.
+ rmNode.context.getRMNodes().remove(rmNode.nodeId);
+ LOG.info("Removed Node " + rmNode.nodeId);
+
+ }
+ }
+
+ public static class StatusUpdateWhenHealthyTransition implements
+ MultipleArcTransition<RMNodeImpl, RMNodeEvent, RMNodeState> {
+ @Override
+ public RMNodeState transition(RMNodeImpl rmNode, RMNodeEvent event) {
+
+ RMNodeStatusEvent statusEvent = (RMNodeStatusEvent) event;
+
+ // Switch the last heartbeatresponse.
+ rmNode.latestHeartBeatResponse = statusEvent.getLatestResponse();
+
+ if (!statusEvent.getNodeHealthStatus().getIsNodeHealthy()) {
+ // Inform the scheduler
+ rmNode.context.getDispatcher().getEventHandler().handle(
+ new NodeRemovedSchedulerEvent(rmNode));
+ return RMNodeState.UNHEALTHY;
+ }
+
+ // Filter the map to only obtain just launched containers and finished
+ // containers.
+ Map<ApplicationId, List<Container>> remoteAppContainersMap = statusEvent
+ .getContainersCollection();
+ Map<ApplicationId, List<Container>> containersMapForScheduler = new HashMap<ApplicationId, List<Container>>(
+ remoteAppContainersMap.size());
+ for (Entry<ApplicationId, List<Container>> entrySet : remoteAppContainersMap
+ .entrySet()) {
+
+ ApplicationId appId = entrySet.getKey();
+ List<Container> remoteContainerList = entrySet.getValue();
+
+ if (!containersMapForScheduler.containsKey(appId)) {
+ containersMapForScheduler.put(appId, new ArrayList<Container>(
+ remoteContainerList.size()));
+ }
+ List<Container> entryForThisApp = containersMapForScheduler
+ .get(appId);
+
+ for (Container remoteContainer : remoteContainerList) {
+
+ // Process running containers
+ ContainerId containerId = remoteContainer.getId();
+ if (remoteContainer.getState() == ContainerState.RUNNING) {
+ if (!rmNode.justLaunchedContainers.containsKey(containerId)) {
+ // Just launched container. RM knows about it the first time.
+ rmNode.justLaunchedContainers.put(containerId, remoteContainer);
+ entryForThisApp.add(remoteContainer);
+ }
+ } else {
+ // A finished container
+ rmNode.justLaunchedContainers.remove(containerId);
+ entryForThisApp.add(remoteContainer);
+ }
+ }
+ }
+
+ rmNode.context.getDispatcher().getEventHandler().handle(
+ new NodeUpdateSchedulerEvent(rmNode, containersMapForScheduler));
+
+ return RMNodeState.RUNNING;
+ }
+ }
+
+ public static class StatusUpdateWhenUnHealthyTransition
+ implements
+ MultipleArcTransition<RMNodeImpl, RMNodeEvent, RMNodeState> {
+
+ @Override
+ public RMNodeState transition(RMNodeImpl rmNode, RMNodeEvent event) {
+ RMNodeStatusEvent statusEvent = (RMNodeStatusEvent) event;
+
+ // Switch the last heartbeatresponse.
+ rmNode.latestHeartBeatResponse = statusEvent.getLatestResponse();
+
+ if (statusEvent.getNodeHealthStatus().getIsNodeHealthy()) {
+ rmNode.context.getDispatcher().getEventHandler().handle(
+ new NodeAddedSchedulerEvent(rmNode));
+ return RMNodeState.RUNNING;
+ }
+
+ return RMNodeState.UNHEALTHY;
+ }
+ }
+ }
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeState.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeState.java
new file mode 100644
index 0000000..387252d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeState.java
@@ -0,0 +1,5 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
+
+public enum RMNodeState {
+ RUNNING, UNHEALTHY, DECOMMISSIONED, LOST
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java
new file mode 100644
index 0000000..fc7bafd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java
@@ -0,0 +1,38 @@
+package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
+
+public class RMNodeStatusEvent extends RMNodeEvent {
+
+ private final NodeHealthStatus nodeHealthStatus;
+ private Map<ApplicationId, List<Container>> containersCollection;
+ private final HeartbeatResponse latestResponse;
+
+ public RMNodeStatusEvent(NodeId nodeId, NodeHealthStatus nodeHealthStatus,
+ Map<ApplicationId, List<Container>> collection,
+ HeartbeatResponse latestResponse) {
+ super(nodeId, RMNodeEventType.STATUS_UPDATE);
+ this.nodeHealthStatus = nodeHealthStatus;
+ this.containersCollection = collection;
+ this.latestResponse = latestResponse;
+ }
+
+ public NodeHealthStatus getNodeHealthStatus() {
+ return this.nodeHealthStatus;
+ }
+
+ public Map<ApplicationId, List<Container>> getContainersCollection() {
+ return this.containersCollection;
+ }
+
+ public HeartbeatResponse getLatestResponse() {
+ return this.latestResponse;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Allocation.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Allocation.java
new file mode 100644
index 0000000..cfef9be
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Allocation.java
@@ -0,0 +1,43 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.Resource;
+
+public class Allocation {
+ final List<Container> containers;
+ final Resource resourceLimit;
+
+ public Allocation(List<Container> containers, Resource resourceLimit) {
+ this.containers = containers;
+ this.resourceLimit = resourceLimit;
+ }
+
+ public List<Container> getContainers() {
+ return containers;
+ }
+
+ public Resource getResourceLimit() {
+ return resourceLimit;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
new file mode 100644
index 0000000..6dae436
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
@@ -0,0 +1,333 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+
+/**
+ * This class keeps track of all the consumption of an application. This also
+ * keeps track of current running/completed containers for the application.
+ */
+@Private
+@Unstable
+public class AppSchedulingInfo {
+
+ private static final Log LOG = LogFactory.getLog(AppSchedulingInfo.class);
+ private final ApplicationAttemptId applicationAttemptId;
+ final ApplicationId applicationId;
+ private final String queueName;
+ Queue queue;
+ final String user;
+ private final AtomicInteger containerIdCounter = new AtomicInteger(0);
+
+ private final RecordFactory recordFactory = RecordFactoryProvider
+ .getRecordFactory(null);
+
+ final Set<Priority> priorities = new TreeSet<Priority>(
+ new org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.Comparator());
+ final Map<Priority, Map<String, ResourceRequest>> requests =
+ new HashMap<Priority, Map<String, ResourceRequest>>();
+
+ private final ApplicationStore store;
+
+ /* Allocated by scheduler */
+ boolean pending = true; // for app metrics
+
+ public AppSchedulingInfo(ApplicationAttemptId appAttemptId,
+ String user, Queue queue, ApplicationStore store) {
+ this.applicationAttemptId = appAttemptId;
+ this.applicationId = appAttemptId.getApplicationId();
+ this.queue = queue;
+ this.queueName = queue.getQueueName();
+ this.user = user;
+ this.store = store;
+ }
+
+ public ApplicationId getApplicationId() {
+ return applicationId;
+ }
+
+ public ApplicationAttemptId getApplicationAttemptId() {
+ return applicationAttemptId;
+ }
+
+ public String getQueueName() {
+ return queueName;
+ }
+
+ public String getUser() {
+ return user;
+ }
+
+ public synchronized boolean isPending() {
+ return pending;
+ }
+
+ /**
+ * Clear any pending requests from this application.
+ */
+ private synchronized void clearRequests() {
+ priorities.clear();
+ requests.clear();
+ LOG.info("Application " + applicationId + " requests cleared");
+ }
+
+ public int getNewContainerId() {
+ return this.containerIdCounter.incrementAndGet();
+ }
+
+ /**
+ * The ApplicationMaster is updating resource requirements for the
+ * application, by asking for more resources and releasing resources acquired
+ * by the application.
+ *
+ * @param requests
+ * resources to be acquired
+ */
+ synchronized public void updateResourceRequests(List<ResourceRequest> requests) {
+ QueueMetrics metrics = queue.getMetrics();
+ // Update resource requests
+ for (ResourceRequest request : requests) {
+ Priority priority = request.getPriority();
+ String hostName = request.getHostName();
+ boolean updatePendingResources = false;
+ ResourceRequest lastRequest = null;
+
+ if (hostName.equals(RMNode.ANY)) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("update:" + " application=" + applicationId + " request="
+ + request);
+ }
+ updatePendingResources = true;
+ }
+
+ Map<String, ResourceRequest> asks = this.requests.get(priority);
+
+ if (asks == null) {
+ asks = new HashMap<String, ResourceRequest>();
+ this.requests.put(priority, asks);
+ this.priorities.add(priority);
+ } else if (updatePendingResources) {
+ lastRequest = asks.get(hostName);
+ }
+
+ asks.put(hostName, request);
+ if (updatePendingResources) {
+ int lastRequestContainers = lastRequest != null ? lastRequest
+ .getNumContainers() : 0;
+ Resource lastRequestCapability = lastRequest != null ? lastRequest
+ .getCapability() : Resources.none();
+ metrics.incrPendingResources(user, request.getNumContainers()
+ - lastRequestContainers, Resources.subtractFrom( // save a clone
+ Resources.multiply(request.getCapability(), request
+ .getNumContainers()), Resources.multiply(lastRequestCapability,
+ lastRequestContainers)));
+ }
+ }
+ }
+
+ synchronized public Collection<Priority> getPriorities() {
+ return priorities;
+ }
+
+ synchronized public Map<String, ResourceRequest> getResourceRequests(
+ Priority priority) {
+ return requests.get(priority);
+ }
+
+ synchronized public ResourceRequest getResourceRequest(Priority priority,
+ String nodeAddress) {
+ Map<String, ResourceRequest> nodeRequests = requests.get(priority);
+ return (nodeRequests == null) ? null : nodeRequests.get(nodeAddress);
+ }
+
+ public synchronized Resource getResource(Priority priority) {
+ ResourceRequest request = getResourceRequest(priority, RMNode.ANY);
+ return request.getCapability();
+ }
+
+ /**
+ * Resources have been allocated to this application by the resource
+ * scheduler. Track them.
+ *
+ * @param type
+ * the type of the node
+ * @param node
+ * the nodeinfo of the node
+ * @param priority
+ * the priority of the request.
+ * @param request
+ * the request
+ * @param container
+ * the containers allocated.
+ */
+ synchronized public void allocate(NodeType type, SchedulerNode node,
+ Priority priority, ResourceRequest request, Container container) {
+ if (type == NodeType.NODE_LOCAL) {
+ allocateNodeLocal(node, priority, request, container);
+ } else if (type == NodeType.RACK_LOCAL) {
+ allocateRackLocal(node, priority, request, container);
+ } else {
+ allocateOffSwitch(node, priority, request, container);
+ }
+ QueueMetrics metrics = queue.getMetrics();
+ if (pending) {
+ // once an allocation is done we assume the application is
+ // running from scheduler's POV.
+ pending = false;
+ metrics.incrAppsRunning(user);
+ }
+ LOG.debug("allocate: user: " + user + ", memory: "
+ + request.getCapability());
+ metrics.allocateResources(user, 1, request.getCapability());
+ }
+
+ /**
+ * The {@link ResourceScheduler} is allocating data-local resources to the
+ * application.
+ *
+ * @param allocatedContainers
+ * resources allocated to the application
+ */
+ synchronized private void allocateNodeLocal(SchedulerNode node, Priority priority,
+ ResourceRequest nodeLocalRequest, Container container) {
+ // Update consumption and track allocations
+ allocate(container);
+
+ // Update future requirements
+ nodeLocalRequest.setNumContainers(nodeLocalRequest.getNumContainers() - 1);
+ if (nodeLocalRequest.getNumContainers() == 0) {
+ this.requests.get(priority).remove(node.getHostName());
+ }
+
+ ResourceRequest rackLocalRequest = requests.get(priority).get(
+ node.getRackName());
+ rackLocalRequest.setNumContainers(rackLocalRequest.getNumContainers() - 1);
+ if (rackLocalRequest.getNumContainers() == 0) {
+ this.requests.get(priority).remove(node.getRackName());
+ }
+
+ // Do not remove ANY
+ ResourceRequest offSwitchRequest = requests.get(priority).get(
+ RMNode.ANY);
+ offSwitchRequest.setNumContainers(offSwitchRequest.getNumContainers() - 1);
+ }
+
+ /**
+ * The {@link ResourceScheduler} is allocating data-local resources to the
+ * application.
+ *
+ * @param allocatedContainers
+ * resources allocated to the application
+ */
+ synchronized private void allocateRackLocal(SchedulerNode node, Priority priority,
+ ResourceRequest rackLocalRequest, Container container) {
+
+ // Update consumption and track allocations
+ allocate(container);
+
+ // Update future requirements
+ rackLocalRequest.setNumContainers(rackLocalRequest.getNumContainers() - 1);
+ if (rackLocalRequest.getNumContainers() == 0) {
+ this.requests.get(priority).remove(node.getRackName());
+ }
+
+ // Do not remove ANY
+ ResourceRequest offSwitchRequest = requests.get(priority).get(
+ RMNode.ANY);
+ offSwitchRequest.setNumContainers(offSwitchRequest.getNumContainers() - 1);
+ }
+
+ /**
+ * The {@link ResourceScheduler} is allocating data-local resources to the
+ * application.
+ *
+ * @param allocatedContainers
+ * resources allocated to the application
+ */
+ synchronized private void allocateOffSwitch(SchedulerNode node, Priority priority,
+ ResourceRequest offSwitchRequest, Container container) {
+
+ // Update consumption and track allocations
+ allocate(container);
+
+ // Update future requirements
+
+ // Do not remove ANY
+ offSwitchRequest.setNumContainers(offSwitchRequest.getNumContainers() - 1);
+ }
+
+ synchronized private void allocate(Container container) {
+ // Update consumption and track allocations
+ //TODO: fixme sharad
+ /* try {
+ store.storeContainer(container);
+ } catch (IOException ie) {
+ // TODO fix this. we shouldnt ignore
+ }*/
+
+ LOG.debug("allocate: applicationId=" + applicationId + " container="
+ + container.getId() + " host="
+ + container.getNodeId().toString());
+ }
+
+ synchronized public void stop(RMAppAttemptState rmAppAttemptFinalState) {
+ // clear pending resources metrics for the application
+ QueueMetrics metrics = queue.getMetrics();
+ for (Map<String, ResourceRequest> asks : requests.values()) {
+ ResourceRequest request = asks.get(RMNode.ANY);
+ if (request != null) {
+ metrics.decrPendingResources(user, request.getNumContainers(),
+ Resources.multiply(request.getCapability(), request
+ .getNumContainers()));
+ }
+ }
+ metrics.finishApp(this, rmAppAttemptFinalState);
+
+ // Clear requests themselves
+ clearRequests();
+ }
+
+ public synchronized void setQueue(Queue queue) {
+ this.queue = queue;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeReport.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeReport.java
new file mode 100644
index 0000000..e3de9ea
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeReport.java
@@ -0,0 +1,28 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.yarn.api.records.Resource;
+
+/**
+ * Node usage report.
+ */
+@Private
+@Stable
+public class NodeReport {
+ private final Resource usedResources;
+ private final int numContainers;
+
+ public NodeReport(Resource used, int numContainers) {
+ this.usedResources = used;
+ this.numContainers = numContainers;
+ }
+
+ public Resource getUsedResources() {
+ return usedResources;
+ }
+
+ public int getNumContainers() {
+ return numContainers;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeResponse.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeResponse.java
new file mode 100644
index 0000000..a06dfb9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeResponse.java
@@ -0,0 +1,33 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+
+
+/**
+ * The class that encapsulates response from clusterinfo for
+ * updates from the node managers.
+ */
+public class NodeResponse {
+ private final List<Container> completed;
+ private final List<Container> toCleanUp;
+ private final List<ApplicationId> finishedApplications;
+
+ public NodeResponse(List<ApplicationId> finishedApplications,
+ List<Container> completed, List<Container> toKill) {
+ this.finishedApplications = finishedApplications;
+ this.completed = completed;
+ this.toCleanUp = toKill;
+ }
+ public List<ApplicationId> getFinishedApplications() {
+ return this.finishedApplications;
+ }
+ public List<Container> getCompletedContainers() {
+ return this.completed;
+ }
+ public List<Container> getContainersToCleanUp() {
+ return this.toCleanUp;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeType.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeType.java
new file mode 100644
index 0000000..821ec24
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeType.java
@@ -0,0 +1,28 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+/**
+ * Resource classification.
+ */
+public enum NodeType {
+ NODE_LOCAL,
+ RACK_LOCAL,
+ OFF_SWITCH
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java
new file mode 100644
index 0000000..2bda03d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.yarn.api.records.QueueACL;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+
+@Evolving
+@LimitedPrivate("yarn")
+public interface Queue {
+ /**
+ * Get the queue name
+ * @return queue name
+ */
+ String getQueueName();
+
+ /**
+ * Get the queue metrics
+ * @return the queue metrics
+ */
+ QueueMetrics getMetrics();
+
+ /**
+ * Get ACLs for the queue.
+ * @return ACLs for the queue
+ */
+ public Map<QueueACL, AccessControlList> getQueueAcls();
+
+ /**
+ * Get queue information
+ * @param includeChildQueues include child queues?
+ * @param recursive recursively get child queue information?
+ * @return queue information
+ */
+ QueueInfo getQueueInfo(boolean includeChildQueues, boolean recursive);
+
+ /**
+ * Get queue ACLs for given <code>user</code>.
+ * @param user username
+ * @return queue ACLs for user
+ */
+ List<QueueUserACLInfo> getQueueUserAclInfo(UserGroupInformation user);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
new file mode 100644
index 0000000..d12ee36
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
@@ -0,0 +1,267 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import com.google.common.base.Splitter;
+import java.util.Map;
+import java.util.HashMap;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import static org.apache.hadoop.metrics2.lib.Interns.info;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableCounterInt;
+import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.util.Self;
+import static org.apache.hadoop.yarn.server.resourcemanager.resource.Resources.*;
+
+import org.slf4j.LoggerFactory;
+import org.slf4j.Logger;
+
+@InterfaceAudience.Private
+@Metrics(context="yarn")
+public class QueueMetrics {
+ @Metric("# of apps submitted") MutableCounterInt appsSubmitted;
+ @Metric("# of running apps") MutableGaugeInt appsRunning;
+ @Metric("# of pending apps") MutableGaugeInt appsPending;
+ @Metric("# of apps completed") MutableCounterInt appsCompleted;
+ @Metric("# of apps killed") MutableCounterInt appsKilled;
+ @Metric("# of apps failed") MutableCounterInt appsFailed;
+
+ @Metric("Allocated memory in GiB") MutableGaugeInt allocatedGB;
+ @Metric("# of allocated containers") MutableGaugeInt allocatedContainers;
+ @Metric("Available memory in GiB") MutableGaugeInt availableGB;
+ @Metric("Pending memory allocation in GiB") MutableGaugeInt pendingGB;
+ @Metric("# of pending containers") MutableGaugeInt pendingContainers;
+ @Metric("# of reserved memory in GiB") MutableGaugeInt reservedGB;
+ @Metric("# of reserved containers") MutableGaugeInt reservedContainers;
+
+ static final Logger LOG = LoggerFactory.getLogger(QueueMetrics.class);
+ static final int GB = 1024; // resource.memory is in MB
+ static final MetricsInfo RECORD_INFO = info("QueueMetrics",
+ "Metrics for the resource scheduler");
+ static final MetricsInfo QUEUE_INFO = info("Queue", "Metrics by queue");
+ static final MetricsInfo USER_INFO = info("User", "Metrics by user");
+ static final Splitter Q_SPLITTER =
+ Splitter.on('.').omitEmptyStrings().trimResults();
+
+ final MetricsRegistry registry;
+ final String queueName;
+ final QueueMetrics parent;
+ final MetricsSystem metricsSystem;
+ private final Map<String, QueueMetrics> users;
+
+ QueueMetrics(MetricsSystem ms, String queueName, Queue parent, boolean enableUserMetrics) {
+ registry = new MetricsRegistry(RECORD_INFO);
+ this.queueName = queueName;
+ this.parent = parent != null ? parent.getMetrics() : null;
+ this.users = enableUserMetrics ? new HashMap<String, QueueMetrics>()
+ : null;
+ metricsSystem = ms;
+ }
+
+ QueueMetrics tag(MetricsInfo info, String value) {
+ registry.tag(info, value);
+ return this;
+ }
+
+ static StringBuilder sourceName(String queueName) {
+ StringBuilder sb = new StringBuilder(RECORD_INFO.name());
+ int i = 0;
+ for (String node : Q_SPLITTER.split(queueName)) {
+ sb.append(",q").append(i++).append('=').append(node);
+ }
+ return sb;
+ }
+
+ public synchronized
+ static QueueMetrics forQueue(String queueName, Queue parent,
+ boolean enableUserMetrics) {
+ return forQueue(DefaultMetricsSystem.instance(), queueName, parent,
+ enableUserMetrics);
+ }
+
+ public static QueueMetrics forQueue(MetricsSystem ms, String queueName,
+ Queue parent, boolean enableUserMetrics) {
+ QueueMetrics metrics = new QueueMetrics(ms, queueName, parent,
+ enableUserMetrics).tag(QUEUE_INFO, queueName);
+ return ms == null ? metrics : ms.register(sourceName(queueName).toString(),
+ "Metrics for queue: " + queueName, metrics);
+ }
+
+ synchronized QueueMetrics getUserMetrics(String userName) {
+ if (users == null) {
+ return null;
+ }
+ QueueMetrics metrics = users.get(userName);
+ if (metrics == null) {
+ metrics = new QueueMetrics(metricsSystem, queueName, null, false);
+ users.put(userName, metrics);
+ metricsSystem.register(
+ sourceName(queueName).append(",user=").append(userName).toString(),
+ "Metrics for user '"+ userName +"' in queue '"+ queueName +"'",
+ metrics.tag(QUEUE_INFO, queueName).tag(USER_INFO, userName));
+ }
+ return metrics;
+ }
+
+ public void submitApp(String user) {
+ appsSubmitted.incr();
+ appsPending.incr();
+ QueueMetrics userMetrics = getUserMetrics(user);
+ if (userMetrics != null) {
+ userMetrics.submitApp(user);
+ }
+ if (parent != null) {
+ parent.submitApp(user);
+ }
+ }
+
+ public void incrAppsRunning(String user) {
+ appsRunning.incr();
+ appsPending.decr();
+ QueueMetrics userMetrics = getUserMetrics(user);
+ if (userMetrics != null) {
+ userMetrics.incrAppsRunning(user);
+ }
+ if (parent != null) {
+ parent.incrAppsRunning(user);
+ }
+ }
+
+ public void finishApp(AppSchedulingInfo app,
+ RMAppAttemptState rmAppAttemptFinalState) {
+ switch (rmAppAttemptFinalState) {
+ case KILLED: appsKilled.incr(); break;
+ case FAILED: appsFailed.incr(); break;
+ default: appsCompleted.incr(); break;
+ }
+ if (app.isPending()) {
+ appsPending.decr();
+ } else {
+ appsRunning.decr();
+ }
+ QueueMetrics userMetrics = getUserMetrics(app.getUser());
+ if (userMetrics != null) {
+ userMetrics.finishApp(app, rmAppAttemptFinalState);
+ }
+ if (parent != null) {
+ parent.finishApp(app, rmAppAttemptFinalState);
+ }
+ }
+
+ /**
+ * Set available resources. To be called by scheduler periodically as
+ * resources become available.
+ * @param limit resource limit
+ */
+ public void setAvailableResourcesToQueue(Resource limit) {
+ availableGB.set(limit.getMemory()/GB);
+ }
+
+ /**
+ * Set available resources. To be called by scheduler periodically as
+ * resources become available.
+ * @param user
+ * @param limit resource limit
+ */
+ public void setAvailableResourcesToUser(String user, Resource limit) {
+ QueueMetrics userMetrics = getUserMetrics(user);
+ if (userMetrics != null) {
+ userMetrics.setAvailableResourcesToQueue(limit);
+ }
+ }
+
+ /**
+ * Increment pending resource metrics
+ * @param user
+ * @param containers
+ * @param res the TOTAL delta of resources note this is different from
+ * the other APIs which use per container resource
+ */
+ public void incrPendingResources(String user, int containers, Resource res) {
+ _incrPendingResources(containers, res);
+ QueueMetrics userMetrics = getUserMetrics(user);
+ if (userMetrics != null) {
+ userMetrics.incrPendingResources(user, containers, res);
+ }
+ if (parent != null) {
+ parent.incrPendingResources(user, containers, res);
+ }
+ }
+
+ private void _incrPendingResources(int containers, Resource res) {
+ pendingContainers.incr(containers);
+ pendingGB.incr(res.getMemory()/GB);
+ }
+
+ public void decrPendingResources(String user, int containers, Resource res) {
+ _decrPendingResources(containers, res);
+ QueueMetrics userMetrics = getUserMetrics(user);
+ if (userMetrics != null) {
+ userMetrics.decrPendingResources(user, containers, res);
+ }
+ if (parent != null) {
+ parent.decrPendingResources(user, containers, res);
+ }
+ }
+
+ private void _decrPendingResources(int containers, Resource res) {
+ pendingContainers.decr(containers);
+ pendingGB.decr(res.getMemory()/GB);
+ }
+
+ public void allocateResources(String user, int containers, Resource res) {
+ allocatedContainers.incr(containers);
+ allocatedGB.incr(res.getMemory()/GB * containers);
+ _decrPendingResources(containers, multiply(res, containers));
+ QueueMetrics userMetrics = getUserMetrics(user);
+ if (userMetrics != null) {
+ userMetrics.allocateResources(user, containers, res);
+ }
+ if (parent != null) {
+ parent.allocateResources(user, containers, res);
+ }
+ }
+
+ public void releaseResources(String user, int containers, Resource res) {
+ allocatedContainers.decr(containers);
+ allocatedGB.decr(res.getMemory()/GB * containers);
+ QueueMetrics userMetrics = getUserMetrics(user);
+ if (userMetrics != null) {
+ userMetrics.releaseResources(user, containers, res);
+ }
+ if (parent != null) {
+ parent.releaseResources(user, containers, res);
+ }
+ }
+
+ public void reserveResource(String user, Resource res) {
+ reservedContainers.incr();
+ reservedGB.incr(res.getMemory()/GB);
+ QueueMetrics userMetrics = getUserMetrics(user);
+ if (userMetrics != null) {
+ userMetrics.reserveResource(user, res);
+ }
+ if (parent != null) {
+ parent.reserveResource(user, res);
+ }
+ }
+
+ public void unreserveResource(String user, Resource res) {
+ reservedContainers.decr();
+ reservedGB.decr(res.getMemory()/GB);
+ QueueMetrics userMetrics = getUserMetrics(user);
+ if (userMetrics != null) {
+ userMetrics.unreserveResource(user, res);
+ }
+ if (parent != null) {
+ parent.unreserveResource(user, res);
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
new file mode 100644
index 0000000..babad10
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
@@ -0,0 +1,46 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable;
+import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
+
+/**
+ * This interface is the one implemented by the schedulers. It mainly extends
+ * {@link ResourceListener} and {@link YarnScheduler}.
+ *
+ */
+@LimitedPrivate("yarn")
+@Evolving
+public interface ResourceScheduler extends YarnScheduler, Recoverable {
+ /**
+ * Re-initialize the <code>ResourceScheduler</code>.
+ * @param conf configuration
+ * @param secretManager token-secret manager
+ * @throws IOException
+ */
+ void reinitialize(Configuration conf,
+ ContainerTokenSecretManager secretManager, RMContext rmContext) throws IOException;
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java
new file mode 100644
index 0000000..4f32248
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java
@@ -0,0 +1,396 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerFinishedEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerReservedEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+
+public class SchedulerApp {
+
+ private static final Log LOG = LogFactory.getLog(SchedulerApp.class);
+
+ private final RecordFactory recordFactory = RecordFactoryProvider
+ .getRecordFactory(null);
+
+ private final AppSchedulingInfo appSchedulingInfo;
+ private final Queue queue;
+
+ private final Resource currentConsumption = recordFactory
+ .newRecordInstance(Resource.class);
+ private Resource resourceLimit = recordFactory
+ .newRecordInstance(Resource.class);
+
+ private Map<ContainerId, RMContainer> liveContainers
+ = new HashMap<ContainerId, RMContainer>();
+ private List<RMContainer> newlyAllocatedContainers =
+ new ArrayList<RMContainer>();
+
+ final Map<Priority, Map<NodeId, RMContainer>> reservedContainers =
+ new HashMap<Priority, Map<NodeId, RMContainer>>();
+
+ Map<Priority, Integer> schedulingOpportunities = new HashMap<Priority, Integer>();
+
+ Resource currentReservation = recordFactory
+ .newRecordInstance(Resource.class);
+
+ private final RMContext rmContext;
+ public SchedulerApp(ApplicationAttemptId applicationAttemptId,
+ String user, Queue queue,
+ RMContext rmContext, ApplicationStore store) {
+ this.rmContext = rmContext;
+ this.appSchedulingInfo =
+ new AppSchedulingInfo(applicationAttemptId, user, queue, store);
+ this.queue = queue;
+ }
+
+ public ApplicationId getApplicationId() {
+ return this.appSchedulingInfo.getApplicationId();
+ }
+
+ public ApplicationAttemptId getApplicationAttemptId() {
+ return this.appSchedulingInfo.getApplicationAttemptId();
+ }
+
+ public String getUser() {
+ return this.appSchedulingInfo.getUser();
+ }
+
+ public synchronized void updateResourceRequests(
+ List<ResourceRequest> requests) {
+ this.appSchedulingInfo.updateResourceRequests(requests);
+ }
+
+ public Map<String, ResourceRequest> getResourceRequests(Priority priority) {
+ return this.appSchedulingInfo.getResourceRequests(priority);
+ }
+
+ public int getNewContainerId() {
+ return this.appSchedulingInfo.getNewContainerId();
+ }
+
+ public Collection<Priority> getPriorities() {
+ return this.appSchedulingInfo.getPriorities();
+ }
+
+ public ResourceRequest getResourceRequest(Priority priority, String nodeAddress) {
+ return this.appSchedulingInfo.getResourceRequest(priority, nodeAddress);
+ }
+
+ public synchronized int getTotalRequiredResources(Priority priority) {
+ return getResourceRequest(priority, RMNode.ANY).getNumContainers();
+ }
+
+ public Resource getResource(Priority priority) {
+ return this.appSchedulingInfo.getResource(priority);
+ }
+
+ public boolean isPending() {
+ return this.appSchedulingInfo.isPending();
+ }
+
+ public String getQueueName() {
+ return this.appSchedulingInfo.getQueueName();
+ }
+
+ public synchronized Collection<RMContainer> getLiveContainers() {
+ return new ArrayList<RMContainer>(liveContainers.values());
+ }
+
+ public synchronized void stop(RMAppAttemptState rmAppAttemptFinalState) {
+ // Cleanup all scheduling information
+ this.appSchedulingInfo.stop(rmAppAttemptFinalState);
+ }
+
+ synchronized public void containerLaunchedOnNode(ContainerId containerId) {
+ // Inform the container
+ RMContainer rmContainer =
+ getRMContainer(containerId);
+ rmContainer.handle(
+ new RMContainerEvent(containerId,
+ RMContainerEventType.LAUNCHED));
+ }
+
+ public synchronized void killContainers(
+ SchedulerApp application) {
+ }
+
+ synchronized public void containerCompleted(RMContainer rmContainer,
+ RMContainerEventType event) {
+
+ Container container = rmContainer.getContainer();
+ ContainerId containerId = container.getId();
+
+ // Inform the container
+ if (event.equals(RMContainerEventType.FINISHED)) {
+ // Have to send diagnostics for finished containers.
+ rmContainer.handle(new RMContainerFinishedEvent(containerId,
+ container.getContainerStatus()));
+ } else {
+ rmContainer.handle(new RMContainerEvent(containerId, event));
+ }
+ LOG.info("Completed container: " + rmContainer.getContainerId() +
+ " in state: " + rmContainer.getState());
+
+ // Remove from the list of containers
+ liveContainers.remove(rmContainer.getContainerId());
+
+ // Update usage metrics
+ Resource containerResource = rmContainer.getContainer().getResource();
+ queue.getMetrics().releaseResources(getUser(), 1, containerResource);
+ Resources.subtractFrom(currentConsumption, containerResource);
+ }
+
+ synchronized public RMContainer allocate(NodeType type, SchedulerNode node,
+ Priority priority, ResourceRequest request,
+ Container container) {
+
+ // Required sanity check - AM can call 'allocate' to update resource
+ // request without locking the scheduler, hence we need to check
+ if (getTotalRequiredResources(priority) <= 0) {
+ return null;
+ }
+
+ // Create RMContainer
+ RMContainer rmContainer = new RMContainerImpl(container, this
+ .getApplicationAttemptId(), node.getNodeID(), this.rmContext
+ .getDispatcher().getEventHandler(), this.rmContext
+ .getContainerAllocationExpirer());
+
+ // Update consumption and track allocations
+
+ // Inform the container
+ rmContainer.handle(
+ new RMContainerEvent(container.getId(), RMContainerEventType.START));
+
+ Resources.addTo(currentConsumption, container.getResource());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("allocate: applicationId=" + container.getId().getAppId()
+ + " container=" + container.getId() + " host="
+ + container.getNodeId().getHost() + " type=" + type);
+ }
+
+ // Add it to allContainers list.
+ newlyAllocatedContainers.add(rmContainer);
+ liveContainers.put(container.getId(), rmContainer);
+
+ appSchedulingInfo.allocate(type, node, priority, request, container);
+
+ return rmContainer;
+ }
+
+ synchronized public List<Container> pullNewlyAllocatedContainers() {
+ List<Container> returnContainerList = new ArrayList<Container>(
+ newlyAllocatedContainers.size());
+ for (RMContainer rmContainer : newlyAllocatedContainers) {
+ rmContainer.handle(new RMContainerEvent(rmContainer.getContainerId(),
+ RMContainerEventType.ACQUIRED));
+ returnContainerList.add(rmContainer.getContainer());
+ }
+ newlyAllocatedContainers.clear();
+ return returnContainerList;
+ }
+
+ public Resource getCurrentConsumption() {
+ return this.currentConsumption;
+ }
+
+ synchronized public void showRequests() {
+ if (LOG.isDebugEnabled()) {
+ for (Priority priority : getPriorities()) {
+ Map<String, ResourceRequest> requests = getResourceRequests(priority);
+ if (requests != null) {
+ LOG.debug("showRequests:" + " application=" + getApplicationId() +
+ " headRoom=" + getHeadroom() +
+ " currentConsumption=" + currentConsumption.getMemory());
+ for (ResourceRequest request : requests.values()) {
+ LOG.debug("showRequests:" + " application=" + getApplicationId()
+ + " request=" + request);
+ }
+ }
+ }
+ }
+ }
+
+ public synchronized void setAvailableResourceLimit(Resource globalLimit) {
+ this.resourceLimit = globalLimit;
+ }
+
+ public synchronized RMContainer getRMContainer(ContainerId id) {
+ return liveContainers.get(id);
+ }
+
+ synchronized public void resetSchedulingOpportunities(Priority priority) {
+ Integer schedulingOpportunities = this.schedulingOpportunities
+ .get(priority);
+ schedulingOpportunities = 0;
+ this.schedulingOpportunities.put(priority, schedulingOpportunities);
+ }
+
+ synchronized public void addSchedulingOpportunity(Priority priority) {
+ Integer schedulingOpportunities = this.schedulingOpportunities
+ .get(priority);
+ if (schedulingOpportunities == null) {
+ schedulingOpportunities = 0;
+ }
+ ++schedulingOpportunities;
+ this.schedulingOpportunities.put(priority, schedulingOpportunities);
+ }
+
+ synchronized public int getSchedulingOpportunities(Priority priority) {
+ Integer schedulingOpportunities = this.schedulingOpportunities
+ .get(priority);
+ if (schedulingOpportunities == null) {
+ schedulingOpportunities = 0;
+ this.schedulingOpportunities.put(priority, schedulingOpportunities);
+ }
+ return schedulingOpportunities;
+ }
+
+ public synchronized int getNumReservedContainers(Priority priority) {
+ Map<NodeId, RMContainer> reservedContainers =
+ this.reservedContainers.get(priority);
+ return (reservedContainers == null) ? 0 : reservedContainers.size();
+ }
+
+ /**
+ * Get total current reservations.
+ * Used only by unit tests
+ * @return total current reservations
+ */
+ @Stable
+ @Private
+ public synchronized Resource getCurrentReservation() {
+ return currentReservation;
+ }
+
+ public synchronized RMContainer reserve(SchedulerNode node, Priority priority,
+ RMContainer rmContainer, Container container) {
+ // Create RMContainer if necessary
+ if (rmContainer == null) {
+ rmContainer =
+ new RMContainerImpl(container, getApplicationAttemptId(),
+ node.getNodeID(), rmContext.getDispatcher().getEventHandler(),
+ rmContext.getContainerAllocationExpirer());
+
+ Resources.addTo(currentReservation, container.getResource());
+ }
+ rmContainer.handle(new RMContainerReservedEvent(container.getId(),
+ container.getResource(), node.getNodeID(), priority));
+
+ Map<NodeId, RMContainer> reservedContainers =
+ this.reservedContainers.get(priority);
+ if (reservedContainers == null) {
+ reservedContainers = new HashMap<NodeId, RMContainer>();
+ this.reservedContainers.put(priority, reservedContainers);
+ }
+ reservedContainers.put(node.getNodeID(), rmContainer);
+
+ LOG.info("Application " + getApplicationId()
+ + " reserved container " + rmContainer
+ + " on node " + node + ", currently has " + reservedContainers.size()
+ + " at priority " + priority
+ + "; currentReservation " + currentReservation.getMemory());
+
+ return rmContainer;
+ }
+
+ public synchronized void unreserve(SchedulerNode node, Priority priority) {
+ Map<NodeId, RMContainer> reservedContainers =
+ this.reservedContainers.get(priority);
+ RMContainer reservedContainer = reservedContainers.remove(node.getNodeID());
+ if (reservedContainers.isEmpty()) {
+ this.reservedContainers.remove(priority);
+ }
+
+ Resource resource = reservedContainer.getContainer().getResource();
+ Resources.subtractFrom(currentReservation, resource);
+
+ LOG.info("Application " + getApplicationId() + " unreserved " + " on node "
+ + node + ", currently has " + reservedContainers.size() + " at priority "
+ + priority + "; currentReservation " + currentReservation);
+ }
+
+ /**
+ * Has the application reserved the given <code>node</code> at the
+ * given <code>priority</code>?
+ * @param node node to be checked
+ * @param priority priority of reserved container
+ * @return
+ */
+ public synchronized boolean isReserved(SchedulerNode node, Priority priority) {
+ Map<NodeId, RMContainer> reservedContainers =
+ this.reservedContainers.get(priority);
+ if (reservedContainers != null) {
+ return reservedContainers.containsKey(node.getNodeID());
+ }
+ return false;
+ }
+
+ public synchronized float getLocalityWaitFactor(
+ Priority priority, int clusterNodes) {
+ // Estimate: Required unique resources (i.e. hosts + racks)
+ int requiredResources =
+ Math.max(this.getResourceRequests(priority).size() - 1, 0);
+
+ // waitFactor can't be more than '1'
+ // i.e. no point skipping more than clustersize opportunities
+ return Math.min(((float)requiredResources / clusterNodes), 1.0f);
+ }
+
+ public synchronized List<RMContainer> getAllReservedContainers() {
+ List<RMContainer> reservedContainers = new ArrayList<RMContainer>();
+ for (Map.Entry<Priority, Map<NodeId, RMContainer>> e :
+ this.reservedContainers.entrySet()) {
+ reservedContainers.addAll(e.getValue().values());
+ }
+ return reservedContainers;
+ }
+
+ /**
+ * Get available headroom in terms of resources for the application's user.
+ * @return available resource headroom
+ */
+ public synchronized Resource getHeadroom() {
+ Resource limit = Resources.subtract(resourceLimit, currentConsumption);
+ Resources.subtractFrom(limit, currentReservation);
+
+ // Corner case to deal with applications being slightly over-limit
+ if (limit.getMemory() < 0) {
+ limit.setMemory(0);
+ }
+
+ return limit;
+ }
+
+ public Queue getQueue() {
+ return queue;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
new file mode 100644
index 0000000..1cd673c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
@@ -0,0 +1,223 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+
+public class SchedulerNode {
+
+ private static final Log LOG = LogFactory.getLog(SchedulerNode.class);
+
+ private static final RecordFactory recordFactory = RecordFactoryProvider
+ .getRecordFactory(null);
+
+ private Resource availableResource = recordFactory.newRecordInstance(Resource.class);
+ private Resource usedResource = recordFactory.newRecordInstance(Resource.class);
+
+ private volatile int numContainers;
+
+ private RMContainer reservedContainer;
+
+ /* set of containers that are allocated containers */
+ private final Map<ContainerId, RMContainer> launchedContainers =
+ new HashMap<ContainerId, RMContainer>();
+
+ private final RMNode rmNode;
+
+ public static final String ANY = "*";
+
+ public SchedulerNode(RMNode node) {
+ this.rmNode = node;
+ this.availableResource.setMemory(node.getTotalCapability().getMemory());
+ }
+
+ public RMNode getRMNode() {
+ return this.rmNode;
+ }
+
+ public NodeId getNodeID() {
+ return this.rmNode.getNodeID();
+ }
+
+ public String getHttpAddress() {
+ return this.rmNode.getHttpAddress();
+ }
+
+ public String getHostName() {
+ return this.rmNode.getHostName();
+ }
+
+ public String getRackName() {
+ return this.rmNode.getRackName();
+ }
+
+ /**
+ * The Scheduler has allocated containers on this node to the
+ * given application.
+ *
+ * @param applicationId application
+ * @param containers allocated containers
+ */
+ public synchronized void allocateContainer(ApplicationId applicationId,
+ RMContainer rmContainer) {
+ Container container = rmContainer.getContainer();
+ deductAvailableResource(container.getResource());
+ ++numContainers;
+
+ launchedContainers.put(container.getId(), rmContainer);
+
+ LOG.info("Assigned container " + container.getId() +
+ " of capacity " + container.getResource() + " on host " + rmNode.getNodeAddress() +
+ ", which currently has " + numContainers + " containers, " +
+ getUsedResource() + " used and " +
+ getAvailableResource() + " available");
+ }
+
+ public synchronized Resource getAvailableResource() {
+ return this.availableResource;
+ }
+
+ public synchronized Resource getUsedResource() {
+ return this.usedResource;
+ }
+
+ private synchronized boolean isValidContainer(Container c) {
+ if (launchedContainers.containsKey(c.getId()))
+ return true;
+ return false;
+ }
+
+ private synchronized void updateResource(Container container) {
+ addAvailableResource(container.getResource());
+ --numContainers;
+ }
+
+ /**
+ * Release an allocated container on this node.
+ * @param container container to be released
+ */
+ public synchronized void releaseContainer(Container container) {
+ if (!isValidContainer(container)) {
+ LOG.error("Invalid container released " + container);
+ return;
+ }
+
+ /* remove the containers from the nodemanger */
+ launchedContainers.remove(container.getId());
+ updateResource(container);
+
+ LOG.info("Released container " + container.getId() +
+ " of capacity " + container.getResource() + " on host " + rmNode.getNodeAddress() +
+ ", which currently has " + numContainers + " containers, " +
+ getUsedResource() + " used and " + getAvailableResource()
+ + " available" + ", release resources=" + true);
+ }
+
+
+ private synchronized void addAvailableResource(Resource resource) {
+ if (resource == null) {
+ LOG.error("Invalid resource addition of null resource for "
+ + rmNode.getNodeAddress());
+ return;
+ }
+ Resources.addTo(availableResource, resource);
+ Resources.subtractFrom(usedResource, resource);
+ }
+
+ private synchronized void deductAvailableResource(Resource resource) {
+ if (resource == null) {
+ LOG.error("Invalid deduction of null resource for "
+ + rmNode.getNodeAddress());
+ return;
+ }
+ Resources.subtractFrom(availableResource, resource);
+ Resources.addTo(usedResource, resource);
+ }
+
+ @Override
+ public String toString() {
+ return "host: " + rmNode.getNodeAddress() + " #containers=" + getNumContainers() +
+ " available=" + getAvailableResource().getMemory() +
+ " used=" + getUsedResource().getMemory();
+ }
+
+ public int getNumContainers() {
+ return numContainers;
+ }
+
+ public synchronized List<RMContainer> getRunningContainers() {
+ return new ArrayList<RMContainer>(launchedContainers.values());
+ }
+
+ public synchronized void reserveResource(
+ SchedulerApp application, Priority priority,
+ RMContainer reservedContainer) {
+ // Check if it's already reserved
+ if (this.reservedContainer != null) {
+ // Sanity check
+ if (!reservedContainer.getContainer().getNodeId().equals(getNodeID())) {
+ throw new IllegalStateException("Trying to reserve" +
+ " container " + reservedContainer +
+ " on node " + reservedContainer.getReservedNode() +
+ " when currently" + " reserved resource " + this.reservedContainer +
+ " on node " + this.reservedContainer.getReservedNode());
+ }
+
+ // Cannot reserve more than one application on a given node!
+ if (!this.reservedContainer.getContainer().getId().getAppAttemptId().equals(
+ reservedContainer.getContainer().getId().getAppAttemptId())) {
+ throw new IllegalStateException("Trying to reserve" +
+ " container " + reservedContainer +
+ " for application " + application.getApplicationId() +
+ " when currently" +
+ " reserved container " + this.reservedContainer +
+ " on node " + this);
+ }
+
+ LOG.info("Updated reserved container " +
+ reservedContainer.getContainer().getId() + " on node " +
+ this + " for application " + application);
+ } else {
+ LOG.info("Reserved container " + reservedContainer.getContainer().getId() +
+ " on node " + this + " for application " + application);
+ }
+ this.reservedContainer = reservedContainer;
+ }
+
+ public synchronized void unreserveResource(SchedulerApp application) {
+ // Cannot unreserve for wrong application...
+ ApplicationAttemptId reservedApplication =
+ reservedContainer.getContainer().getId().getAppAttemptId();
+ if (!reservedApplication.equals(
+ application.getApplicationAttemptId())) {
+ throw new IllegalStateException("Trying to unreserve " +
+ " for application " + application.getApplicationId() +
+ " when currently reserved " +
+ " for application " + reservedApplication.getApplicationId() +
+ " on node " + this);
+ }
+
+ reservedContainer = null;
+ }
+
+ public synchronized RMContainer getReservedContainer() {
+ return reservedContainer;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNodeReport.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNodeReport.java
new file mode 100644
index 0000000..5a5a420
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNodeReport.java
@@ -0,0 +1,28 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.yarn.api.records.Resource;
+
+/**
+ * Node usage report.
+ */
+@Private
+@Stable
+public class SchedulerNodeReport {
+ private final Resource usedResources;
+ private final int numContainers;
+
+ public SchedulerNodeReport(Resource used, int numContainers) {
+ this.usedResources = used;
+ this.numContainers = numContainers;
+ }
+
+ public Resource getUsedResources() {
+ return usedResources;
+ }
+
+ public int getNumContainers() {
+ return numContainers;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java
new file mode 100644
index 0000000..9f6a2f7
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java
@@ -0,0 +1,126 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
+
+/**
+ * This interface is used by the components to talk to the
+ * scheduler for allocating of resources, cleaning up resources.
+ *
+ */
+public interface YarnScheduler extends EventHandler<SchedulerEvent> {
+
+ /**
+ * Get queue information
+ * @param queueName queue name
+ * @param includeChildQueues include child queues?
+ * @param recursive get children queues?
+ * @return queue information
+ * @throws IOException
+ */
+ @Public
+ @Stable
+ public QueueInfo getQueueInfo(String queueName, boolean includeChildQueues,
+ boolean recursive) throws IOException;
+
+ /**
+ * Get acls for queues for current user.
+ * @return acls for queues for current user
+ * @throws IOException
+ */
+ @Public
+ @Stable
+ public List<QueueUserACLInfo> getQueueUserAclInfo();
+
+ /**
+ * Get minimum allocatable {@link Resource}.
+ * @return minimum allocatable resource
+ */
+ @Public
+ @Stable
+ public Resource getMinimumResourceCapability();
+
+ /**
+ * Get maximum allocatable {@link Resource}.
+ * @return maximum allocatable resource
+ */
+ @Public
+ @Stable
+ public Resource getMaximumResourceCapability();
+
+ /**
+ * The main api between the ApplicationMaster and the Scheduler.
+ * The ApplicationMaster is updating his future resource requirements
+ * and may release containers he doens't need.
+ *
+ * @param appAttemptId
+ * @param ask
+ * @param release
+ * @return the {@link Allocation} for the application
+ */
+ @Public
+ @Stable
+ Allocation
+ allocate(ApplicationAttemptId appAttemptId,
+ List<ResourceRequest> ask,
+ List<ContainerId> release);
+
+ /**
+ * Get node resource usage report.
+ * @param nodeId
+ * @return the {@link SchedulerNodeReport} for the node
+ */
+ @Private
+ @Stable
+ public SchedulerNodeReport getNodeReport(NodeId nodeId);
+
+ /**
+ * Get used resources on the node
+ * @param nodeId node
+ * @return used resources on the node
+ */
+ @Private
+ @Stable
+ Resource getUsedResource(NodeId nodeId);
+
+ /**
+ * Get available resources on the node
+ * @param nodeId node
+ * @return available resources on the node
+ */
+ @Private
+ @Stable
+ Resource getAvailableResource(NodeId nodeId);
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
new file mode 100644
index 0000000..77172e8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -0,0 +1,725 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.Lock;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store.RMState;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptRejectedEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
+import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
+
+@LimitedPrivate("yarn")
+@Evolving
+public class CapacityScheduler
+implements ResourceScheduler, CapacitySchedulerContext {
+
+ private static final Log LOG = LogFactory.getLog(CapacityScheduler.class);
+
+ private Queue root;
+
+ private final static List<Container> EMPTY_CONTAINER_LIST =
+ new ArrayList<Container>();
+
+ static final Comparator<Queue> queueComparator = new Comparator<Queue>() {
+ @Override
+ public int compare(Queue q1, Queue q2) {
+ if (q1.getUtilization() < q2.getUtilization()) {
+ return -1;
+ } else if (q1.getUtilization() > q2.getUtilization()) {
+ return 1;
+ }
+
+ return q1.getQueuePath().compareTo(q2.getQueuePath());
+ }
+ };
+
+ static final Comparator<SchedulerApp> applicationComparator =
+ new Comparator<SchedulerApp>() {
+ @Override
+ public int compare(SchedulerApp a1, SchedulerApp a2) {
+ return a1.getApplicationId().getId() - a2.getApplicationId().getId();
+ }
+ };
+
+ private CapacitySchedulerConfiguration conf;
+ private ContainerTokenSecretManager containerTokenSecretManager;
+ private RMContext rmContext;
+
+ private Map<String, Queue> queues = new ConcurrentHashMap<String, Queue>();
+
+ private Map<NodeId, SchedulerNode> nodes =
+ new ConcurrentHashMap<NodeId, SchedulerNode>();
+
+ private Resource clusterResource =
+ RecordFactoryProvider.getRecordFactory(null).newRecordInstance(Resource.class);
+ private int numNodeManagers = 0;
+
+ private Resource minimumAllocation;
+ private Resource maximumAllocation;
+
+ private Map<ApplicationAttemptId, SchedulerApp> applications =
+ new ConcurrentHashMap<ApplicationAttemptId, SchedulerApp>();
+
+ private boolean initialized = false;
+
+ public Queue getRootQueue() {
+ return root;
+ }
+
+ @Override
+ public CapacitySchedulerConfiguration getConfiguration() {
+ return conf;
+ }
+
+ @Override
+ public ContainerTokenSecretManager getContainerTokenSecretManager() {
+ return containerTokenSecretManager;
+ }
+
+ @Override
+ public Resource getMinimumResourceCapability() {
+ return minimumAllocation;
+ }
+
+ @Override
+ public Resource getMaximumResourceCapability() {
+ return maximumAllocation;
+ }
+
+ public synchronized Resource getUsedResource(NodeId nodeId) {
+ return nodes.get(nodeId).getUsedResource();
+ }
+
+ public synchronized Resource getAvailableResource(NodeId nodeId) {
+ return nodes.get(nodeId).getAvailableResource();
+ }
+
+ public synchronized int getNumClusterNodes() {
+ return numNodeManagers;
+ }
+
+ @Override
+ public RMContext getRMContext() {
+ return this.rmContext;
+ }
+
+ @Override
+ public synchronized void reinitialize(Configuration conf,
+ ContainerTokenSecretManager containerTokenSecretManager, RMContext rmContext)
+ throws IOException {
+ if (!initialized) {
+ this.conf = new CapacitySchedulerConfiguration(conf);
+ this.minimumAllocation = this.conf.getMinimumAllocation();
+ this.maximumAllocation = this.conf.getMaximumAllocation();
+ this.containerTokenSecretManager = containerTokenSecretManager;
+ this.rmContext = rmContext;
+ initializeQueues(this.conf);
+ initialized = true;
+ } else {
+
+ CapacitySchedulerConfiguration oldConf = this.conf;
+ this.conf = new CapacitySchedulerConfiguration(conf);
+ try {
+ LOG.info("Re-initializing queues...");
+ reinitializeQueues(this.conf);
+ } catch (Throwable t) {
+ this.conf = oldConf;
+ throw new IOException("Failed to re-init queues", t);
+ }
+ }
+ }
+
+ @Private
+ public static final String ROOT = "root";
+
+ @Private
+ public static final String ROOT_QUEUE =
+ CapacitySchedulerConfiguration.PREFIX + ROOT;
+
+ static class QueueHook {
+ public Queue hook(Queue queue) {
+ return queue;
+ }
+ }
+ private static final QueueHook noop = new QueueHook();
+
+ @Lock(CapacityScheduler.class)
+ private void initializeQueues(CapacitySchedulerConfiguration conf) {
+ root =
+ parseQueue(this, conf, null, ROOT, queues, queues,
+ queueComparator, applicationComparator, noop);
+ LOG.info("Initialized root queue " + root);
+ }
+
+ @Lock(CapacityScheduler.class)
+ private void reinitializeQueues(CapacitySchedulerConfiguration conf)
+ throws IOException {
+ // Parse new queues
+ Map<String, Queue> newQueues = new HashMap<String, Queue>();
+ Queue newRoot =
+ parseQueue(this, conf, null, ROOT, newQueues, queues,
+ queueComparator, applicationComparator, noop);
+
+ // Ensure all existing queues are still present
+ validateExistingQueues(queues, newQueues);
+
+ // Add new queues
+ addNewQueues(queues, newQueues);
+
+ // Re-configure queues
+ root.reinitialize(newRoot, clusterResource);
+ }
+
+ /**
+ * Ensure all existing queues are present. Queues cannot be deleted
+ * @param queues existing queues
+ * @param newQueues new queues
+ */
+ @Lock(CapacityScheduler.class)
+ private void validateExistingQueues(
+ Map<String, Queue> queues, Map<String, Queue> newQueues)
+ throws IOException {
+ for (String queue : queues.keySet()) {
+ if (!newQueues.containsKey(queue)) {
+ throw new IOException(queue + " cannot be found during refresh!");
+ }
+ }
+ }
+
+ /**
+ * Add the new queues (only) to our list of queues...
+ * ... be careful, do not overwrite existing queues.
+ * @param queues
+ * @param newQueues
+ */
+ @Lock(CapacityScheduler.class)
+ private void addNewQueues(
+ Map<String, Queue> queues, Map<String, Queue> newQueues)
+ {
+ for (Map.Entry<String, Queue> e : newQueues.entrySet()) {
+ String queueName = e.getKey();
+ Queue queue = e.getValue();
+ if (!queues.containsKey(queueName)) {
+ queues.put(queueName, queue);
+ }
+ }
+ }
+
+ @Lock(CapacityScheduler.class)
+ static Queue parseQueue(
+ CapacitySchedulerContext csContext,
+ CapacitySchedulerConfiguration conf,
+ Queue parent, String queueName, Map<String, Queue> queues,
+ Map<String, Queue> oldQueues,
+ Comparator<Queue> queueComparator,
+ Comparator<SchedulerApp> applicationComparator,
+ QueueHook hook) {
+ Queue queue;
+ String[] childQueueNames =
+ conf.getQueues((parent == null) ?
+ queueName : (parent.getQueuePath()+"."+queueName));
+ if (childQueueNames == null || childQueueNames.length == 0) {
+ if (null == parent) {
+ throw new IllegalStateException(
+ "Queue configuration missing child queue names for " + queueName);
+ }
+ queue = new LeafQueue(csContext, queueName, parent, applicationComparator,
+ oldQueues.get(queueName));
+
+ // Used only for unit tests
+ queue = hook.hook(queue);
+ } else {
+ ParentQueue parentQueue =
+ new ParentQueue(csContext, queueName, queueComparator, parent,
+ oldQueues.get(queueName));
+
+ // Used only for unit tests
+ queue = hook.hook(parentQueue);
+
+ List<Queue> childQueues = new ArrayList<Queue>();
+ for (String childQueueName : childQueueNames) {
+ Queue childQueue =
+ parseQueue(csContext, conf, queue, childQueueName,
+ queues, oldQueues, queueComparator, applicationComparator, hook);
+ childQueues.add(childQueue);
+ }
+ parentQueue.setChildQueues(childQueues);
+ }
+
+ queues.put(queueName, queue);
+
+ LOG.info("Initialized queue: " + queue);
+ return queue;
+ }
+
+ synchronized Queue getQueue(String queueName) {
+ return queues.get(queueName);
+ }
+
+ private synchronized void
+ addApplication(ApplicationAttemptId applicationAttemptId,
+ String queueName, String user) {
+
+ // Sanity checks
+ Queue queue = getQueue(queueName);
+ if (queue == null) {
+ String message = "Application " + applicationAttemptId +
+ " submitted by user " + user + " to unknown queue: " + queueName;
+ this.rmContext.getDispatcher().getEventHandler().handle(
+ new RMAppAttemptRejectedEvent(applicationAttemptId, message));
+ return;
+ }
+ if (!(queue instanceof LeafQueue)) {
+ String message = "Application " + applicationAttemptId +
+ " submitted by user " + user + " to non-leaf queue: " + queueName;
+ this.rmContext.getDispatcher().getEventHandler().handle(
+ new RMAppAttemptRejectedEvent(applicationAttemptId, message));
+ return;
+ }
+
+ // TODO: Fix store
+ SchedulerApp SchedulerApp =
+ new SchedulerApp(applicationAttemptId, user, queue, rmContext, null);
+
+ // Submit to the queue
+ try {
+ queue.submitApplication(SchedulerApp, user, queueName);
+ } catch (AccessControlException ace) {
+ this.rmContext.getDispatcher().getEventHandler().handle(
+ new RMAppAttemptRejectedEvent(applicationAttemptId,
+ ace.toString()));
+ return;
+ }
+
+ applications.put(applicationAttemptId, SchedulerApp);
+
+ LOG.info("Application Submission: " + applicationAttemptId +
+ ", user: " + user +
+ " queue: " + queue +
+ ", currently active: " + applications.size());
+
+ rmContext.getDispatcher().getEventHandler().handle(
+ new RMAppAttemptEvent(applicationAttemptId,
+ RMAppAttemptEventType.APP_ACCEPTED));
+ }
+
+ private synchronized void doneApplication(
+ ApplicationAttemptId applicationAttemptId,
+ RMAppAttemptState rmAppAttemptFinalState) {
+ LOG.info("Application " + applicationAttemptId + " is done." +
+ " finalState=" + rmAppAttemptFinalState);
+
+ SchedulerApp application = getApplication(applicationAttemptId);
+
+ if (application == null) {
+ // throw new IOException("Unknown application " + applicationId +
+ // " has completed!");
+ LOG.info("Unknown application " + applicationAttemptId + " has completed!");
+ return;
+ }
+
+ // Release all the running containers
+ for (RMContainer rmContainer : application.getLiveContainers()) {
+ completedContainer(rmContainer, RMContainerEventType.KILL);
+ }
+
+ // Release all reserved containers
+ for (RMContainer rmContainer : application.getAllReservedContainers()) {
+ completedContainer(rmContainer, RMContainerEventType.KILL);
+ }
+
+ // Clean up pending requests, metrics etc.
+ application.stop(rmAppAttemptFinalState);
+
+ // Inform the queue
+ String queueName = application.getQueue().getQueueName();
+ Queue queue = queues.get(queueName);
+ if (!(queue instanceof LeafQueue)) {
+ LOG.error("Cannot finish application " + "from non-leaf queue: "
+ + queueName);
+ } else {
+ queue.finishApplication(application, queue.getQueueName());
+ }
+
+ // Remove from our data-structure
+ applications.remove(applicationAttemptId);
+ }
+
+ private static final Allocation EMPTY_ALLOCATION =
+ new Allocation(EMPTY_CONTAINER_LIST, Resources.createResource(0));
+
+ @Override
+ @Lock(Lock.NoLock.class)
+ public Allocation allocate(ApplicationAttemptId applicationAttemptId,
+ List<ResourceRequest> ask, List<ContainerId> release) {
+
+ SchedulerApp application = getApplication(applicationAttemptId);
+ if (application == null) {
+ LOG.info("Calling allocate on removed " +
+ "or non existant application " + applicationAttemptId);
+ return EMPTY_ALLOCATION;
+ }
+
+ // Sanity check
+ normalizeRequests(ask);
+
+ // Release containers
+ for (ContainerId releasedContainerId : release) {
+ completedContainer(getRMContainer(releasedContainerId),
+ RMContainerEventType.RELEASED);
+ }
+
+ synchronized (application) {
+
+ if (!ask.isEmpty()) {
+
+ LOG.info("DEBUG --- allocate: pre-update" +
+ " applicationAttemptId=" + applicationAttemptId +
+ " application=" + application);
+ application.showRequests();
+
+ // Update application requests
+ application.updateResourceRequests(ask);
+
+ LOG.info("DEBUG --- allocate: post-update");
+ application.showRequests();
+ }
+
+ LOG.info("DEBUG --- allocate:" +
+ " applicationAttemptId=" + applicationAttemptId +
+ " #ask=" + ask.size());
+
+ return new Allocation(
+ application.pullNewlyAllocatedContainers(),
+ application.getHeadroom());
+ }
+ }
+
+ @Override
+ @Lock(Lock.NoLock.class)
+ public QueueInfo getQueueInfo(String queueName,
+ boolean includeChildQueues, boolean recursive)
+ throws IOException {
+ Queue queue = null;
+
+ synchronized (this) {
+ queue = this.queues.get(queueName);
+ }
+
+ if (queue == null) {
+ throw new IOException("Unknown queue: " + queueName);
+ }
+ return queue.getQueueInfo(includeChildQueues, recursive);
+ }
+
+ @Override
+ @Lock(Lock.NoLock.class)
+ public List<QueueUserACLInfo> getQueueUserAclInfo() {
+ UserGroupInformation user = null;
+ try {
+ user = UserGroupInformation.getCurrentUser();
+ } catch (IOException ioe) {
+ // should never happen
+ return new ArrayList<QueueUserACLInfo>();
+ }
+
+ return root.getQueueUserAclInfo(user);
+ }
+
+ @Lock(Lock.NoLock.class)
+ private void normalizeRequests(List<ResourceRequest> asks) {
+ for (ResourceRequest ask : asks) {
+ normalizeRequest(ask);
+ }
+ }
+
+ @Lock(Lock.NoLock.class)
+ private void normalizeRequest(ResourceRequest ask) {
+ int minMemory = minimumAllocation.getMemory();
+ int memory = Math.max(ask.getCapability().getMemory(), minMemory);
+ ask.getCapability().setMemory (
+ minMemory * ((memory/minMemory) + (memory%minMemory > 0 ? 1 : 0)));
+ }
+
+ private synchronized void nodeUpdate(RMNode nm,
+ Map<ApplicationId, List<Container>> containers ) {
+ LOG.info("nodeUpdate: " + nm + " clusterResources: " + clusterResource);
+
+ SchedulerNode node = getNode(nm.getNodeID());
+
+ // Processing the current containers running/finished on node
+ for (List<Container> appContainers : containers.values()) {
+ for (Container container : appContainers) {
+ if (container.getState() == ContainerState.RUNNING) {
+ containerLaunchedOnNode(container, node);
+ } else { // has to be 'COMPLETE'
+ LOG.info("DEBUG --- Container FINISHED: " + container.getId());
+ completedContainer(getRMContainer(container.getId()),
+ RMContainerEventType.FINISHED);
+ }
+ }
+ }
+
+ // Now node data structures are upto date and ready for scheduling.
+ LOG.info("DEBUG -- Node being looked for scheduling " + nm
+ + " availableResource: " + node.getAvailableResource());
+
+ // Assign new containers...
+ // 1. Check for reserved applications
+ // 2. Schedule if there are no reservations
+
+ RMContainer reservedContainer = node.getReservedContainer();
+ if (reservedContainer != null) {
+ SchedulerApp reservedApplication =
+ getApplication(reservedContainer.getApplicationAttemptId());
+
+ // Try to fulfill the reservation
+ LOG.info("Trying to fulfill reservation for application " +
+ reservedApplication.getApplicationId() + " on node: " + nm);
+
+ LeafQueue queue = ((LeafQueue)reservedApplication.getQueue());
+ queue.assignContainers(clusterResource, node);
+ }
+
+ // Try to schedule more if there are no reservations to fulfill
+ if (node.getReservedContainer() == null) {
+ root.assignContainers(clusterResource, node);
+ } else {
+ LOG.info("Skipping scheduling since node " + nm +
+ " is reserved by application " +
+ node.getReservedContainer().getContainerId().getAppId());
+ }
+
+ }
+
+ private void containerLaunchedOnNode(Container container, SchedulerNode node) {
+ // Get the application for the finished container
+ ApplicationAttemptId applicationAttemptId = container.getId().getAppAttemptId();
+ SchedulerApp application = getApplication(applicationAttemptId);
+ if (application == null) {
+ LOG.info("Unknown application: " + applicationAttemptId +
+ " launched container " + container.getId() +
+ " on node: " + node);
+ return;
+ }
+
+ application.containerLaunchedOnNode(container.getId());
+ }
+
+ @Override
+ public void handle(SchedulerEvent event) {
+ switch(event.getType()) {
+ case NODE_ADDED:
+ {
+ NodeAddedSchedulerEvent nodeAddedEvent = (NodeAddedSchedulerEvent)event;
+ addNode(nodeAddedEvent.getAddedRMNode());
+ }
+ break;
+ case NODE_REMOVED:
+ {
+ NodeRemovedSchedulerEvent nodeRemovedEvent = (NodeRemovedSchedulerEvent)event;
+ removeNode(nodeRemovedEvent.getRemovedRMNode());
+ }
+ break;
+ case NODE_UPDATE:
+ {
+ NodeUpdateSchedulerEvent nodeUpdatedEvent = (NodeUpdateSchedulerEvent)event;
+ nodeUpdate(nodeUpdatedEvent.getRMNode(),
+ nodeUpdatedEvent.getContainers());
+ }
+ break;
+ case APP_ADDED:
+ {
+ AppAddedSchedulerEvent appAddedEvent = (AppAddedSchedulerEvent)event;
+ addApplication(appAddedEvent.getApplicationAttemptId(), appAddedEvent
+ .getQueue(), appAddedEvent.getUser());
+ }
+ break;
+ case APP_REMOVED:
+ {
+ AppRemovedSchedulerEvent appRemovedEvent = (AppRemovedSchedulerEvent)event;
+ doneApplication(appRemovedEvent.getApplicationAttemptID(),
+ appRemovedEvent.getFinalAttemptState());
+ }
+ break;
+ case CONTAINER_EXPIRED:
+ {
+ ContainerExpiredSchedulerEvent containerExpiredEvent =
+ (ContainerExpiredSchedulerEvent) event;
+ completedContainer(getRMContainer(containerExpiredEvent.getContainerId()),
+ RMContainerEventType.EXPIRE);
+ }
+ break;
+ default:
+ LOG.error("Invalid eventtype " + event.getType() + ". Ignoring!");
+ }
+ }
+
+ private synchronized void addNode(RMNode nodeManager) {
+ this.nodes.put(nodeManager.getNodeID(), new SchedulerNode(nodeManager));
+ Resources.addTo(clusterResource, nodeManager.getTotalCapability());
+ ++numNodeManagers;
+ LOG.info("Added node " + nodeManager.getNodeAddress() +
+ " clusterResource: " + clusterResource);
+ }
+
+ private synchronized void removeNode(RMNode nodeInfo) {
+ SchedulerNode node = this.nodes.get(nodeInfo.getNodeID());
+ Resources.subtractFrom(clusterResource, nodeInfo.getTotalCapability());
+ --numNodeManagers;
+
+ // Remove running containers
+ List<RMContainer> runningContainers = node.getRunningContainers();
+ for (RMContainer container : runningContainers) {
+ completedContainer(container, RMContainerEventType.KILL);
+ }
+
+ // Remove reservations, if any
+ RMContainer reservedContainer = node.getReservedContainer();
+ if (reservedContainer != null) {
+ completedContainer(reservedContainer, RMContainerEventType.KILL);
+ }
+
+ this.nodes.remove(nodeInfo.getNodeID());
+ LOG.info("Removed node " + nodeInfo.getNodeAddress() +
+ " clusterResource: " + clusterResource);
+ }
+
+ @Lock(CapacityScheduler.class)
+ private synchronized void completedContainer(RMContainer rmContainer,
+ RMContainerEventType event) {
+ if (rmContainer == null) {
+ LOG.info("Null container completed...");
+ return;
+ }
+
+ Container container = rmContainer.getContainer();
+
+ // Get the application for the finished container
+ ApplicationAttemptId applicationAttemptId = container.getId().getAppAttemptId();
+ SchedulerApp application = getApplication(applicationAttemptId);
+ if (application == null) {
+ LOG.info("Container " + container + " of" +
+ " unknown application " + applicationAttemptId +
+ " completed with event " + event);
+ return;
+ }
+
+ // Get the node on which the container was allocated
+ SchedulerNode node = getNode(container.getNodeId());
+
+ // Inform the queue
+ LeafQueue queue = (LeafQueue)application.getQueue();
+ queue.completedContainer(clusterResource, application, node,
+ rmContainer, event);
+
+ LOG.info("Application " + applicationAttemptId +
+ " released container " + container.getId() +
+ " on node: " + node +
+ " with event: " + event);
+ }
+
+ @Lock(Lock.NoLock.class)
+ SchedulerApp getApplication(ApplicationAttemptId applicationAttemptId) {
+ return applications.get(applicationAttemptId);
+ }
+
+ @Lock(Lock.NoLock.class)
+ SchedulerNode getNode(NodeId nodeId) {
+ return nodes.get(nodeId);
+ }
+
+ private RMContainer getRMContainer(ContainerId containerId) {
+ SchedulerApp application =
+ getApplication(containerId.getAppAttemptId());
+ return (application == null) ? null : application.getRMContainer(containerId);
+ }
+
+ @Override
+ @Lock(Lock.NoLock.class)
+ public void recover(RMState state) throws Exception {
+ // TODO: VINDOKVFIXME recovery
+// applications.clear();
+// for (Map.Entry<ApplicationId, ApplicationInfo> entry : state.getStoredApplications().entrySet()) {
+// ApplicationId appId = entry.getKey();
+// ApplicationInfo appInfo = entry.getValue();
+// SchedulerApp app = applications.get(appId);
+// app.allocate(appInfo.getContainers());
+// for (Container c: entry.getValue().getContainers()) {
+// Queue queue = queues.get(appInfo.getApplicationSubmissionContext().getQueue());
+// queue.recoverContainer(clusterResource, applications.get(appId), c);
+// }
+// }
+ }
+
+ @Override
+ public SchedulerNodeReport getNodeReport(NodeId nodeId) {
+ SchedulerNode node = getNode(nodeId);
+ return new SchedulerNodeReport(
+ node.getUsedResource(), node.getNumContainers());
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
new file mode 100644
index 0000000..714a472
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -0,0 +1,216 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.QueueACL;
+import org.apache.hadoop.yarn.api.records.QueueState;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+
+public class CapacitySchedulerConfiguration extends Configuration {
+
+ private static final Log LOG =
+ LogFactory.getLog(CapacitySchedulerConfiguration.class);
+
+ private static final String CS_CONFIGURATION_FILE = "capacity-scheduler.xml";
+
+ @Private
+ public static final String PREFIX = "yarn.capacity-scheduler.";
+
+ @Private
+ public static final String DOT = ".";
+
+ @Private
+ public static final String MAXIMUM_SYSTEM_APPLICATIONS =
+ PREFIX + "maximum-applications";
+
+ @Private
+ public static final String QUEUES = "queues";
+
+ @Private
+ public static final String CAPACITY = "capacity";
+
+ @Private
+ public static final String MAXIMUM_CAPACITY = "maximum-capacity";
+
+ @Private
+ public static final String USER_LIMIT = "minimum-user-limit-percent";
+
+ @Private
+ public static final String USER_LIMIT_FACTOR = "user-limit-factor";
+
+ @Private
+ public static final String STATE = "state";
+
+ private static final int MINIMUM_MEMORY = 1024;
+
+ @Private
+ public static final String MINIMUM_ALLOCATION =
+ PREFIX + "minimum-allocation-mb";
+
+ private static final int MAXIMUM_MEMORY = 10240;
+
+ @Private
+ public static final String MAXIMUM_ALLOCATION =
+ PREFIX + "maximum-allocation-mb";
+
+ @Private
+ public static final int DEFAULT_MAXIMUM_SYSTEM_APPLICATIIONS = 10000;
+
+ @Private
+ public static final int UNDEFINED = -1;
+
+ @Private
+ public static final int MINIMUM_CAPACITY_VALUE = 1;
+
+ @Private
+ public static final int MAXIMUM_CAPACITY_VALUE = 100;
+
+ @Private
+ public static final int DEFAULT_USER_LIMIT = 100;
+
+ @Private
+ public static final float DEFAULT_USER_LIMIT_FACTOR = 1.0f;
+
+ @Private
+ public static final String DEFAULT_ACL = "*";
+
+ @Private public static final String ENABLE_USER_METRICS =
+ PREFIX +"user-metrics.enable";
+ @Private public static final boolean DEFAULT_ENABLE_USER_METRICS = false;
+
+ public CapacitySchedulerConfiguration() {
+ this(new Configuration());
+ }
+
+ public CapacitySchedulerConfiguration(Configuration configuration) {
+ super(configuration);
+ addResource(CS_CONFIGURATION_FILE);
+ }
+
+ private String getQueuePrefix(String queue) {
+ String queueName = PREFIX + queue + DOT;
+ return queueName;
+ }
+
+ public int getMaximumSystemApplications() {
+ int maxApplications =
+ getInt(MAXIMUM_SYSTEM_APPLICATIONS, DEFAULT_MAXIMUM_SYSTEM_APPLICATIIONS);
+ return maxApplications;
+ }
+
+ public int getCapacity(String queue) {
+ int capacity = getInt(getQueuePrefix(queue) + CAPACITY, UNDEFINED);
+ if (capacity < MINIMUM_CAPACITY_VALUE || capacity > MAXIMUM_CAPACITY_VALUE) {
+ throw new IllegalArgumentException("Illegal " +
+ "capacity of " + capacity + " for queue " + queue);
+ }
+ LOG.info("CSConf - setCapacity: queuePrefix=" + getQueuePrefix(queue) +
+ ", capacity=" + capacity);
+ return capacity;
+ }
+
+ public void setCapacity(String queue, int capacity) {
+ setInt(getQueuePrefix(queue) + CAPACITY, capacity);
+ LOG.info("CSConf - setCapacity: queuePrefix=" + getQueuePrefix(queue) +
+ ", capacity=" + capacity);
+ }
+
+ public int getMaximumCapacity(String queue) {
+ int maxCapacity =
+ getInt(getQueuePrefix(queue) + MAXIMUM_CAPACITY, UNDEFINED);
+ return maxCapacity;
+ }
+
+ public int getUserLimit(String queue) {
+ int userLimit =
+ getInt(getQueuePrefix(queue) + USER_LIMIT, DEFAULT_USER_LIMIT);
+ return userLimit;
+ }
+
+ public float getUserLimitFactor(String queue) {
+ float userLimitFactor =
+ getFloat(getQueuePrefix(queue) + USER_LIMIT_FACTOR,
+ DEFAULT_USER_LIMIT_FACTOR);
+ return userLimitFactor;
+ }
+
+ public void setUserLimitFactor(String queue, float userLimitFactor) {
+ setFloat(getQueuePrefix(queue) + USER_LIMIT_FACTOR, userLimitFactor);
+ }
+
+ public QueueState getState(String queue) {
+ String state = get(getQueuePrefix(queue) + STATE);
+ return (state != null) ?
+ QueueState.valueOf(state.toUpperCase()) : QueueState.RUNNING;
+ }
+
+ private static String getAclKey(QueueACL acl) {
+ return "acl_" + acl.toString().toLowerCase();
+ }
+
+ public Map<QueueACL, AccessControlList> getAcls(String queue) {
+ Map<QueueACL, AccessControlList> acls =
+ new HashMap<QueueACL, AccessControlList>();
+ String queuePrefix = getQueuePrefix(queue);
+ for (QueueACL acl : QueueACL.values()) {
+ acls.put(acl,
+ new AccessControlList(get(queuePrefix + getAclKey(acl),
+ DEFAULT_ACL)));
+ }
+ return acls;
+ }
+
+ public String[] getQueues(String queue) {
+ LOG.info("CSConf - getQueues called for: queuePrefix=" + getQueuePrefix(queue));
+ String[] queues = getStrings(getQueuePrefix(queue) + QUEUES);
+ LOG.info("CSConf - getQueues: queuePrefix=" + getQueuePrefix(queue) +
+ ", queues=" + ((queues == null) ? "" : StringUtils.arrayToString(queues)));
+ return queues;
+ }
+
+ public void setQueues(String queue, String[] subQueues) {
+ set(getQueuePrefix(queue) + QUEUES, StringUtils.arrayToString(subQueues));
+ LOG.info("CSConf - setQueues: qPrefix=" + getQueuePrefix(queue) +
+ ", queues=" + StringUtils.arrayToString(subQueues));
+ }
+
+ public Resource getMinimumAllocation() {
+ int minimumMemory = getInt(MINIMUM_ALLOCATION, MINIMUM_MEMORY);
+ return Resources.createResource(minimumMemory);
+ }
+
+ public Resource getMaximumAllocation() {
+ int maximumMemory = getInt(MAXIMUM_ALLOCATION, MAXIMUM_MEMORY);
+ return Resources.createResource(maximumMemory);
+ }
+
+ public boolean getEnableUserMetrics() {
+ return getBoolean(ENABLE_USER_METRICS, DEFAULT_ENABLE_USER_METRICS);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java
new file mode 100644
index 0000000..d48557a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java
@@ -0,0 +1,41 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
+
+/**
+ * Read-only interface to {@link CapacityScheduler} context.
+ */
+public interface CapacitySchedulerContext {
+ CapacitySchedulerConfiguration getConfiguration();
+
+ Resource getMinimumResourceCapability();
+
+ Resource getMaximumResourceCapability();
+
+ ContainerTokenSecretManager getContainerTokenSecretManager();
+
+ int getNumClusterNodes();
+
+ RMContext getRMContext();
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
new file mode 100644
index 0000000..6f98c26
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -0,0 +1,1127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerToken;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.QueueACL;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.api.records.QueueState;
+import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+
+@Private
+@Unstable
+public class LeafQueue implements Queue {
+ private static final Log LOG = LogFactory.getLog(LeafQueue.class);
+
+ private final String queueName;
+ private Queue parent;
+ private float capacity;
+ private float absoluteCapacity;
+ private float maximumCapacity;
+ private float absoluteMaxCapacity;
+ private int userLimit;
+ private float userLimitFactor;
+
+ private int maxApplications;
+ private int maxApplicationsPerUser;
+ private Resource usedResources = Resources.createResource(0);
+ private float utilization = 0.0f;
+ private float usedCapacity = 0.0f;
+ private volatile int numContainers;
+
+ Set<SchedulerApp> applications;
+ Map<ApplicationAttemptId, SchedulerApp> applicationsMap =
+ new HashMap<ApplicationAttemptId, SchedulerApp>();
+
+ public final Resource minimumAllocation;
+
+ private ContainerTokenSecretManager containerTokenSecretManager;
+
+ private Map<String, User> users = new HashMap<String, User>();
+
+ private final QueueMetrics metrics;
+
+ private QueueInfo queueInfo;
+
+ private QueueState state;
+
+ private Map<QueueACL, AccessControlList> acls =
+ new HashMap<QueueACL, AccessControlList>();
+
+ private final RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(null);
+
+ private CapacitySchedulerContext scheduler;
+
+ public LeafQueue(CapacitySchedulerContext cs,
+ String queueName, Queue parent,
+ Comparator<SchedulerApp> applicationComparator, Queue old) {
+ this.scheduler = cs;
+ this.queueName = queueName;
+ this.parent = parent;
+ // must be after parent and queueName are initialized
+ this.metrics = old != null ? old.getMetrics() :
+ QueueMetrics.forQueue(getQueuePath(), parent,
+ cs.getConfiguration().getEnableUserMetrics());
+
+ this.minimumAllocation = cs.getMinimumResourceCapability();
+ this.containerTokenSecretManager = cs.getContainerTokenSecretManager();
+
+ float capacity =
+ (float)cs.getConfiguration().getCapacity(getQueuePath()) / 100;
+ float absoluteCapacity = parent.getAbsoluteCapacity() * capacity;
+
+ float maximumCapacity = cs.getConfiguration().getMaximumCapacity(getQueuePath());
+ float absoluteMaxCapacity =
+ (maximumCapacity == CapacitySchedulerConfiguration.UNDEFINED) ?
+ Float.MAX_VALUE : (parent.getAbsoluteCapacity() * maximumCapacity) / 100;
+
+ int userLimit = cs.getConfiguration().getUserLimit(getQueuePath());
+ float userLimitFactor =
+ cs.getConfiguration().getUserLimitFactor(getQueuePath());
+
+ int maxSystemJobs = cs.getConfiguration().getMaximumSystemApplications();
+ int maxApplications = (int)(maxSystemJobs * absoluteCapacity);
+ int maxApplicationsPerUser =
+ (int)(maxApplications * (userLimit / 100.0f) * userLimitFactor);
+
+ this.queueInfo = recordFactory.newRecordInstance(QueueInfo.class);
+ this.queueInfo.setQueueName(queueName);
+ this.queueInfo.setChildQueues(new ArrayList<QueueInfo>());
+
+ QueueState state = cs.getConfiguration().getState(getQueuePath());
+
+ Map<QueueACL, AccessControlList> acls =
+ cs.getConfiguration().getAcls(getQueuePath());
+
+ setupQueueConfigs(capacity, absoluteCapacity,
+ maximumCapacity, absoluteMaxCapacity,
+ userLimit, userLimitFactor,
+ maxApplications, maxApplicationsPerUser,
+ state, acls);
+
+ LOG.info("DEBUG --- LeafQueue:" +
+ " name=" + queueName +
+ ", fullname=" + getQueuePath());
+
+ this.applications = new TreeSet<SchedulerApp>(applicationComparator);
+ }
+
+ private synchronized void setupQueueConfigs(
+ float capacity, float absoluteCapacity,
+ float maxCapacity, float absoluteMaxCapacity,
+ int userLimit, float userLimitFactor,
+ int maxApplications, int maxApplicationsPerUser,
+ QueueState state, Map<QueueACL, AccessControlList> acls)
+ {
+ this.capacity = capacity;
+ this.absoluteCapacity = parent.getAbsoluteCapacity() * capacity;
+
+ this.maximumCapacity = maxCapacity;
+ this.absoluteMaxCapacity = absoluteMaxCapacity;
+
+ this.userLimit = userLimit;
+ this.userLimitFactor = userLimitFactor;
+
+ this.maxApplications = maxApplications;
+ this.maxApplicationsPerUser = maxApplicationsPerUser;
+
+ this.state = state;
+
+ this.acls = acls;
+
+ this.queueInfo.setCapacity(capacity);
+ this.queueInfo.setMaximumCapacity(maximumCapacity);
+ this.queueInfo.setQueueState(state);
+
+ StringBuilder aclsString = new StringBuilder();
+ for (Map.Entry<QueueACL, AccessControlList> e : acls.entrySet()) {
+ aclsString.append(e.getKey() + ":" + e.getValue().getAclString());
+ }
+
+ LOG.info("Initializing " + queueName +
+ ", capacity=" + capacity +
+ ", asboluteCapacity=" + absoluteCapacity +
+ ", maxCapacity=" + maxCapacity +
+ ", asboluteMaxCapacity=" + absoluteMaxCapacity +
+ ", userLimit=" + userLimit + ", userLimitFactor=" + userLimitFactor +
+ ", maxApplications=" + maxApplications +
+ ", maxApplicationsPerUser=" + maxApplicationsPerUser +
+ ", state=" + state +
+ ", acls=" + aclsString);
+ }
+
+ @Override
+ public synchronized float getCapacity() {
+ return capacity;
+ }
+
+ @Override
+ public synchronized float getAbsoluteCapacity() {
+ return absoluteCapacity;
+ }
+
+ @Override
+ public synchronized float getMaximumCapacity() {
+ return maximumCapacity;
+ }
+
+ @Override
+ public synchronized float getAbsoluteMaximumCapacity() {
+ return absoluteMaxCapacity;
+ }
+
+ @Override
+ public Queue getParent() {
+ return parent;
+ }
+
+ @Override
+ public String getQueueName() {
+ return queueName;
+ }
+
+ @Override
+ public String getQueuePath() {
+ return parent.getQueuePath() + "." + getQueueName();
+ }
+
+ @Override
+ public synchronized float getUsedCapacity() {
+ return usedCapacity;
+ }
+
+ @Override
+ public synchronized Resource getUsedResources() {
+ return usedResources;
+ }
+
+ @Override
+ public synchronized float getUtilization() {
+ return utilization;
+ }
+
+ @Override
+ public List<Queue> getChildQueues() {
+ return null;
+ }
+
+ synchronized void setUtilization(float utilization) {
+ this.utilization = utilization;
+ }
+
+ synchronized void setUsedCapacity(float usedCapacity) {
+ this.usedCapacity = usedCapacity;
+ }
+
+ /**
+ * Set maximum capacity - used only for testing.
+ * @param maximumCapacity new max capacity
+ */
+ synchronized void setMaxCapacity(float maximumCapacity) {
+ this.maximumCapacity = maximumCapacity;
+ this.absoluteMaxCapacity =
+ (maximumCapacity == CapacitySchedulerConfiguration.UNDEFINED) ?
+ Float.MAX_VALUE :
+ (parent.getAbsoluteCapacity() * maximumCapacity);
+ }
+
+ /**
+ * Set user limit - used only for testing.
+ * @param userLimit new user limit
+ */
+ synchronized void setUserLimit(int userLimit) {
+ this.userLimit = userLimit;
+ }
+
+ /**
+ * Set user limit factor - used only for testing.
+ * @param userLimitFactor new user limit factor
+ */
+ synchronized void setUserLimitFactor(int userLimitFactor) {
+ this.userLimitFactor = userLimitFactor;
+ }
+
+ synchronized void setParentQueue(Queue parent) {
+ this.parent = parent;
+ }
+
+ public synchronized int getNumApplications() {
+ return applications.size();
+ }
+
+ public synchronized int getNumContainers() {
+ return numContainers;
+ }
+
+ @Override
+ public synchronized QueueState getState() {
+ return state;
+ }
+
+ @Override
+ public synchronized Map<QueueACL, AccessControlList> getQueueAcls() {
+ return new HashMap<QueueACL, AccessControlList>(acls);
+ }
+
+ @Override
+ public synchronized QueueInfo getQueueInfo(
+ boolean includeChildQueues, boolean recursive) {
+ queueInfo.setCurrentCapacity(usedCapacity);
+ return queueInfo;
+ }
+
+ @Override
+ public synchronized List<QueueUserACLInfo>
+ getQueueUserAclInfo(UserGroupInformation user) {
+ QueueUserACLInfo userAclInfo =
+ recordFactory.newRecordInstance(QueueUserACLInfo.class);
+ List<QueueACL> operations = new ArrayList<QueueACL>();
+ for (Map.Entry<QueueACL, AccessControlList> e : acls.entrySet()) {
+ QueueACL operation = e.getKey();
+ AccessControlList acl = e.getValue();
+
+ if (acl.isUserAllowed(user)) {
+ operations.add(operation);
+ }
+ }
+
+ userAclInfo.setQueueName(getQueueName());
+ userAclInfo.setUserAcls(operations);
+ return Collections.singletonList(userAclInfo);
+ }
+
+ public String toString() {
+ return queueName + ":" + capacity + ":" + absoluteCapacity + ":" +
+ getUsedCapacity() + ":" + getUtilization() + ":" +
+ getNumApplications() + ":" + getNumContainers();
+ }
+
+ private synchronized User getUser(String userName) {
+ User user = users.get(userName);
+ if (user == null) {
+ user = new User();
+ users.put(userName, user);
+ }
+ return user;
+ }
+
+ @Override
+ public synchronized void reinitialize(Queue queue, Resource clusterResource)
+ throws IOException {
+ // Sanity check
+ if (!(queue instanceof LeafQueue) ||
+ !queue.getQueuePath().equals(getQueuePath())) {
+ throw new IOException("Trying to reinitialize " + getQueuePath() +
+ " from " + queue.getQueuePath());
+ }
+
+ LeafQueue leafQueue = (LeafQueue)queue;
+ setupQueueConfigs(leafQueue.capacity, leafQueue.absoluteCapacity,
+ leafQueue.maximumCapacity, leafQueue.absoluteMaxCapacity,
+ leafQueue.userLimit, leafQueue.userLimitFactor,
+ leafQueue.maxApplications, leafQueue.maxApplicationsPerUser,
+ leafQueue.state, leafQueue.acls);
+
+ updateResource(clusterResource);
+ }
+
+ @Override
+ public boolean hasAccess(QueueACL acl, UserGroupInformation user) {
+ // Check if the leaf-queue allows access
+ synchronized (this) {
+ if (acls.get(acl).isUserAllowed(user)) {
+ return true;
+ }
+ }
+
+ // Check if parent-queue allows access
+ return parent.hasAccess(acl, user);
+ }
+
+ @Override
+ public void submitApplication(SchedulerApp application, String userName,
+ String queue) throws AccessControlException {
+ // Careful! Locking order is important!
+
+ // Check queue ACLs
+ UserGroupInformation userUgi;
+ try {
+ userUgi = UserGroupInformation.getCurrentUser();
+ } catch (IOException ioe) {
+ throw new AccessControlException(ioe);
+ }
+ if (!hasAccess(QueueACL.SUBMIT_JOB, userUgi)) {
+ throw new AccessControlException("User " + userName + " cannot submit" +
+ " jobs to queue " + getQueuePath());
+ }
+
+ User user = null;
+ synchronized (this) {
+
+ // Check if the queue is accepting jobs
+ if (state != QueueState.RUNNING) {
+ String msg = "Queue " + getQueuePath() +
+ " is STOPPED. Cannot accept submission of application: " +
+ application.getApplicationId();
+ LOG.info(msg);
+ throw new AccessControlException(msg);
+ }
+
+ // Check submission limits for queues
+ if (getNumApplications() >= maxApplications) {
+ String msg = "Queue " + getQueuePath() +
+ " already has " + getNumApplications() + " applications," +
+ " cannot accept submission of application: " +
+ application.getApplicationId();
+ LOG.info(msg);
+ throw new AccessControlException(msg);
+ }
+
+ // Check submission limits for the user on this queue
+ user = getUser(userName);
+ if (user.getApplications() >= maxApplicationsPerUser) {
+ String msg = "Queue " + getQueuePath() +
+ " already has " + user.getApplications() +
+ " applications from user " + userName +
+ " cannot accept submission of application: " +
+ application.getApplicationId();
+ LOG.info(msg);
+ throw new AccessControlException(msg);
+ }
+
+ // Add the application to our data-structures
+ addApplication(application, user);
+ }
+
+ metrics.submitApp(userName);
+
+ // Inform the parent queue
+ try {
+ parent.submitApplication(application, userName, queue);
+ } catch (AccessControlException ace) {
+ LOG.info("Failed to submit application to parent-queue: " +
+ parent.getQueuePath(), ace);
+ removeApplication(application, user);
+ throw ace;
+ }
+ }
+
+ private synchronized void addApplication(SchedulerApp application, User user) {
+ // Accept
+ user.submitApplication();
+ applications.add(application);
+ applicationsMap.put(application.getApplicationAttemptId(), application);
+
+ LOG.info("Application added -" +
+ " appId: " + application.getApplicationId() +
+ " user: " + user + "," + " leaf-queue: " + getQueueName() +
+ " #user-applications: " + user.getApplications() +
+ " #queue-applications: " + getNumApplications());
+ }
+
+ @Override
+ public void finishApplication(SchedulerApp application, String queue) {
+ // Careful! Locking order is important!
+ synchronized (this) {
+ removeApplication(application, getUser(application.getUser()));
+ }
+
+ // Inform the parent queue
+ parent.finishApplication(application, queue);
+ }
+
+ public synchronized void removeApplication(SchedulerApp application, User user) {
+ applications.remove(application);
+ applicationsMap.remove(application.getApplicationAttemptId());
+
+ user.finishApplication();
+ if (user.getApplications() == 0) {
+ users.remove(application.getUser());
+ }
+
+ LOG.info("Application removed -" +
+ " appId: " + application.getApplicationId() +
+ " user: " + application.getUser() +
+ " queue: " + getQueueName() +
+ " #user-applications: " + user.getApplications() +
+ " #queue-applications: " + getNumApplications());
+ }
+
+ private synchronized SchedulerApp getApplication(
+ ApplicationAttemptId applicationAttemptId) {
+ return applicationsMap.get(applicationAttemptId);
+ }
+
+ @Override
+ public synchronized Resource
+ assignContainers(Resource clusterResource, SchedulerNode node) {
+
+ LOG.info("DEBUG --- assignContainers:" +
+ " node=" + node.getHostName() +
+ " #applications=" + applications.size());
+
+ // Check for reserved resources
+ RMContainer reservedContainer = node.getReservedContainer();
+ if (reservedContainer != null) {
+ SchedulerApp application =
+ getApplication(reservedContainer.getApplicationAttemptId());
+ return assignReservedContainer(application, node, reservedContainer,
+ clusterResource);
+ }
+
+ // Try to assign containers to applications in order
+ for (SchedulerApp application : applications) {
+
+ LOG.info("DEBUG --- pre-assignContainers for application "
+ + application.getApplicationId());
+ application.showRequests();
+
+ synchronized (application) {
+ Resource userLimit =
+ computeUserLimit(application, clusterResource, Resources.none());
+ setUserResourceLimit(application, userLimit);
+
+ for (Priority priority : application.getPriorities()) {
+
+ // Do we need containers at this 'priority'?
+ if (!needContainers(application, priority)) {
+ continue;
+ }
+
+ // Are we going over limits by allocating to this application?
+ ResourceRequest required =
+ application.getResourceRequest(priority, RMNode.ANY);
+
+ // Maximum Capacity of the queue
+ if (!assignToQueue(clusterResource, required.getCapability())) {
+ return Resources.none();
+ }
+
+ // User limits
+ userLimit =
+ computeUserLimit(application, clusterResource,
+ required.getCapability());
+ if (!assignToUser(application.getUser(), userLimit)) {
+ break;
+ }
+
+ // Inform the application it is about to get a scheduling opportunity
+ application.addSchedulingOpportunity(priority);
+
+ // Try to schedule
+ Resource assigned =
+ assignContainersOnNode(clusterResource, node, application, priority,
+ null);
+
+ // Did we schedule or reserve a container?
+ if (Resources.greaterThan(assigned, Resources.none())) {
+ Resource assignedResource =
+ application.getResourceRequest(priority, RMNode.ANY).getCapability();
+
+ // Book-keeping
+ allocateResource(clusterResource,
+ application.getUser(), assignedResource);
+
+ // Reset scheduling opportunities
+ application.resetSchedulingOpportunities(priority);
+
+ // Done
+ return assignedResource;
+ } else {
+ // Do not assign out of order w.r.t priorities
+ break;
+ }
+ }
+ }
+
+ LOG.info("DEBUG --- post-assignContainers for application "
+ + application.getApplicationId());
+ application.showRequests();
+ }
+
+ return Resources.none();
+
+ }
+
+ private synchronized Resource assignReservedContainer(SchedulerApp application,
+ SchedulerNode node, RMContainer rmContainer, Resource clusterResource) {
+ // Do we still need this reservation?
+ Priority priority = rmContainer.getReservedPriority();
+ if (application.getTotalRequiredResources(priority) == 0) {
+ // Release
+ Container container = rmContainer.getContainer();
+ completedContainer(clusterResource, application, node,
+ rmContainer, RMContainerEventType.RELEASED);
+ return container.getResource();
+ }
+
+ // Try to assign if we have sufficient resources
+ assignContainersOnNode(clusterResource, node, application, priority, rmContainer);
+
+ // Doesn't matter... since it's already charged for at time of reservation
+ // "re-reservation" is *free*
+ return org.apache.hadoop.yarn.server.resourcemanager.resource.Resource.NONE;
+ }
+
+ private synchronized boolean assignToQueue(Resource clusterResource,
+ Resource required) {
+ // Check how of the cluster's absolute capacity we are currently using...
+ float potentialNewCapacity =
+ (float)(usedResources.getMemory() + required.getMemory()) /
+ clusterResource.getMemory();
+ if (potentialNewCapacity > absoluteMaxCapacity) {
+ LOG.info(getQueueName() +
+ " usedResources: " + usedResources.getMemory() +
+ " currentCapacity " + ((float)usedResources.getMemory())/clusterResource.getMemory() +
+ " required " + required.getMemory() +
+ " potentialNewCapacity: " + potentialNewCapacity + " ( " +
+ " > max-capacity (" + absoluteMaxCapacity + ")");
+ return false;
+ }
+ return true;
+ }
+
+ private void setUserResourceLimit(SchedulerApp application,
+ Resource resourceLimit) {
+ application.setAvailableResourceLimit(resourceLimit);
+ metrics.setAvailableResourcesToUser(application.getUser(), resourceLimit);
+ }
+
+ private int roundUp(int memory) {
+ return divideAndCeil(memory, minimumAllocation.getMemory()) *
+ minimumAllocation.getMemory();
+ }
+
+ private Resource computeUserLimit(SchedulerApp application,
+ Resource clusterResource, Resource required) {
+ // What is our current capacity?
+ // * It is equal to the max(required, queue-capacity) if
+ // we're running below capacity. The 'max' ensures that jobs in queues
+ // with miniscule capacity (< 1 slot) make progress
+ // * If we're running over capacity, then its
+ // (usedResources + required) (which extra resources we are allocating)
+
+ // Allow progress for queues with miniscule capacity
+ final int queueCapacity =
+ Math.max(
+ roundUp((int)(absoluteCapacity * clusterResource.getMemory())),
+ required.getMemory());
+
+ final int consumed = usedResources.getMemory();
+ final int currentCapacity =
+ (consumed < queueCapacity) ?
+ queueCapacity : (consumed + required.getMemory());
+
+ // Never allow a single user to take more than the
+ // queue's configured capacity * user-limit-factor.
+ // Also, the queue's configured capacity should be higher than
+ // queue-hard-limit * ulMin
+
+ String userName = application.getUser();
+
+ final int activeUsers = users.size();
+ User user = getUser(userName);
+
+ int limit =
+ roundUp(
+ Math.min(
+ Math.max(divideAndCeil(currentCapacity, activeUsers),
+ divideAndCeil((int)userLimit*currentCapacity, 100)),
+ (int)(queueCapacity * userLimitFactor)
+ )
+ );
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("User limit computation for " + userName +
+ " in queue " + getQueueName() +
+ " userLimit=" + userLimit +
+ " userLimitFactor=" + userLimitFactor +
+ " required: " + required +
+ " consumed: " + user.getConsumedResources() +
+ " limit: " + limit +
+ " queueCapacity: " + queueCapacity +
+ " qconsumed: " + consumed +
+ " currentCapacity: " + currentCapacity +
+ " activeUsers: " + activeUsers +
+ " clusterCapacity: " + clusterResource.getMemory()
+ );
+ }
+
+ return Resources.createResource(limit);
+ }
+
+ private synchronized boolean assignToUser(String userName, Resource limit) {
+
+ User user = getUser(userName);
+
+ // Note: We aren't considering the current request since there is a fixed
+ // overhead of the AM, but it's a >= check, so...
+ if ((user.getConsumedResources().getMemory()) > limit.getMemory()) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("User " + userName + " in queue " + getQueueName() +
+ " will exceed limit - " +
+ " consumed: " + user.getConsumedResources() +
+ " limit: " + limit
+ );
+ }
+ return false;
+ }
+
+ return true;
+ }
+
+ private static int divideAndCeil(int a, int b) {
+ if (b == 0) {
+ LOG.info("divideAndCeil called with a=" + a + " b=" + b);
+ return 0;
+ }
+ return (a + (b - 1)) / b;
+ }
+
+ boolean needContainers(SchedulerApp application, Priority priority) {
+ int requiredContainers = application.getTotalRequiredResources(priority);
+ int reservedContainers = application.getNumReservedContainers(priority);
+ return ((requiredContainers - reservedContainers) > 0);
+ }
+
+ private Resource assignContainersOnNode(Resource clusterResource,
+ SchedulerNode node, SchedulerApp application,
+ Priority priority, RMContainer reservedContainer) {
+
+ Resource assigned = Resources.none();
+
+ // Data-local
+ assigned =
+ assignNodeLocalContainers(clusterResource, node, application, priority,
+ reservedContainer);
+ if (Resources.greaterThan(assigned, Resources.none())) {
+ return assigned;
+ }
+
+ // Rack-local
+ assigned =
+ assignRackLocalContainers(clusterResource, node, application, priority,
+ reservedContainer);
+ if (Resources.greaterThan(assigned, Resources.none())) {
+ return assigned;
+ }
+
+ // Off-switch
+ return assignOffSwitchContainers(clusterResource, node, application,
+ priority, reservedContainer);
+ }
+
+ private Resource assignNodeLocalContainers(Resource clusterResource,
+ SchedulerNode node, SchedulerApp application,
+ Priority priority, RMContainer reservedContainer) {
+ ResourceRequest request =
+ application.getResourceRequest(priority, node.getHostName());
+ if (request != null) {
+ if (canAssign(application, priority, node, NodeType.NODE_LOCAL,
+ reservedContainer)) {
+ return assignContainer(clusterResource, node, application, priority,
+ request, NodeType.NODE_LOCAL, reservedContainer);
+ }
+ }
+
+ return Resources.none();
+ }
+
+ private Resource assignRackLocalContainers(Resource clusterResource,
+ SchedulerNode node, SchedulerApp application, Priority priority,
+ RMContainer reservedContainer) {
+ ResourceRequest request =
+ application.getResourceRequest(priority, node.getRackName());
+ if (request != null) {
+ if (canAssign(application, priority, node, NodeType.RACK_LOCAL,
+ reservedContainer)) {
+ return assignContainer(clusterResource, node, application, priority, request,
+ NodeType.RACK_LOCAL, reservedContainer);
+ }
+ }
+ return Resources.none();
+ }
+
+ private Resource assignOffSwitchContainers(Resource clusterResource, SchedulerNode node,
+ SchedulerApp application, Priority priority,
+ RMContainer reservedContainer) {
+ ResourceRequest request =
+ application.getResourceRequest(priority, RMNode.ANY);
+ if (request != null) {
+ if (canAssign(application, priority, node, NodeType.OFF_SWITCH,
+ reservedContainer)) {
+ return assignContainer(clusterResource, node, application, priority, request,
+ NodeType.OFF_SWITCH, reservedContainer);
+ }
+ }
+
+ return Resources.none();
+ }
+
+ boolean canAssign(SchedulerApp application, Priority priority,
+ SchedulerNode node, NodeType type, RMContainer reservedContainer) {
+
+ // Reserved...
+ if (reservedContainer != null) {
+ return true;
+ }
+
+ // Clearly we need containers for this application...
+ if (type == NodeType.OFF_SWITCH) {
+ // 'Delay' off-switch
+ ResourceRequest offSwitchRequest =
+ application.getResourceRequest(priority, RMNode.ANY);
+ long missedOpportunities = application.getSchedulingOpportunities(priority);
+ long requiredContainers = offSwitchRequest.getNumContainers();
+
+ float localityWaitFactor =
+ application.getLocalityWaitFactor(priority,
+ scheduler.getNumClusterNodes());
+
+ return ((requiredContainers * localityWaitFactor) < missedOpportunities);
+ }
+
+ // Check if we need containers on this rack
+ ResourceRequest rackLocalRequest =
+ application.getResourceRequest(priority, node.getRackName());
+ if (type == NodeType.RACK_LOCAL) {
+ if (rackLocalRequest == null) {
+ return false;
+ } else {
+ return rackLocalRequest.getNumContainers() > 0;
+ }
+ }
+
+ // Check if we need containers on this host
+ if (type == NodeType.NODE_LOCAL) {
+ // First: Do we need containers on this rack?
+ if (rackLocalRequest != null && rackLocalRequest.getNumContainers() == 0) {
+ return false;
+ }
+
+ // Now check if we need containers on this host...
+ ResourceRequest nodeLocalRequest =
+ application.getResourceRequest(priority, node.getHostName());
+ if (nodeLocalRequest != null) {
+ return nodeLocalRequest.getNumContainers() > 0;
+ }
+ }
+
+ return false;
+ }
+
+ private Container getContainer(RMContainer rmContainer,
+ SchedulerApp application, SchedulerNode node, Resource capability) {
+ return (rmContainer != null) ? rmContainer.getContainer() :
+ createContainer(application, node, capability);
+ }
+
+ public Container createContainer(SchedulerApp application, SchedulerNode node,
+ Resource capability) {
+ Container container =
+ BuilderUtils.newContainer(this.recordFactory,
+ application.getApplicationAttemptId(),
+ application.getNewContainerId(),
+ node.getNodeID(),
+ node.getHttpAddress(), capability);
+
+ // If security is enabled, send the container-tokens too.
+ if (UserGroupInformation.isSecurityEnabled()) {
+ ContainerToken containerToken =
+ this.recordFactory.newRecordInstance(ContainerToken.class);
+ ContainerTokenIdentifier tokenidentifier =
+ new ContainerTokenIdentifier(container.getId(),
+ container.getNodeId().toString(), container.getResource());
+ containerToken.setIdentifier(
+ ByteBuffer.wrap(tokenidentifier.getBytes()));
+ containerToken.setKind(ContainerTokenIdentifier.KIND.toString());
+ containerToken.setPassword(
+ ByteBuffer.wrap(
+ containerTokenSecretManager.createPassword(tokenidentifier))
+ );
+ containerToken.setService(container.getNodeId().toString());
+ container.setContainerToken(containerToken);
+ }
+
+ return container;
+ }
+
+ private Resource assignContainer(Resource clusterResource, SchedulerNode node,
+ SchedulerApp application, Priority priority,
+ ResourceRequest request, NodeType type, RMContainer rmContainer) {
+ if (LOG.isDebugEnabled()) {
+ LOG.info("DEBUG --- assignContainers:" +
+ " node=" + node.getHostName() +
+ " application=" + application.getApplicationId().getId() +
+ " priority=" + priority.getPriority() +
+ " request=" + request + " type=" + type);
+ }
+ Resource capability = request.getCapability();
+
+ Resource available = node.getAvailableResource();
+
+ assert (available.getMemory() > 0);
+
+ // Create the container if necessary
+ Container container =
+ getContainer(rmContainer, application, node, capability);
+
+ // Can we allocate a container on this node?
+ int availableContainers =
+ available.getMemory() / capability.getMemory();
+ if (availableContainers > 0) {
+ // Allocate...
+
+ // Did we previously reserve containers at this 'priority'?
+ if (rmContainer != null){
+ unreserve(application, priority, node, rmContainer);
+ }
+
+ // Inform the application
+ RMContainer allocatedContainer =
+ application.allocate(type, node, priority, request, container);
+ if (allocatedContainer == null) {
+ // Did the application need this resource?
+ return Resources.none();
+ }
+
+ // Inform the node
+ node.allocateContainer(application.getApplicationId(),
+ allocatedContainer);
+
+ LOG.info("assignedContainer" +
+ " application=" + application.getApplicationId() +
+ " container=" + container +
+ " containerId=" + container.getId() +
+ " queue=" + this +
+ " util=" + getUtilization() +
+ " used=" + usedResources +
+ " cluster=" + clusterResource);
+
+ return container.getResource();
+ } else {
+ // Reserve by 'charging' in advance...
+ reserve(application, priority, node, rmContainer, container);
+
+ LOG.info("Reserved container " +
+ " application=" + application.getApplicationId() +
+ " resource=" + request.getCapability() +
+ " queue=" + this.toString() +
+ " util=" + getUtilization() +
+ " used=" + usedResources +
+ " cluster=" + clusterResource);
+
+ return request.getCapability();
+ }
+ }
+
+ private void reserve(SchedulerApp application, Priority priority,
+ SchedulerNode node, RMContainer rmContainer, Container container) {
+ rmContainer = application.reserve(node, priority, rmContainer, container);
+ node.reserveResource(application, priority, rmContainer);
+
+ // Update reserved metrics if this is the first reservation
+ if (rmContainer == null) {
+ getMetrics().reserveResource(
+ application.getUser(), container.getResource());
+ }
+ }
+
+ private void unreserve(SchedulerApp application, Priority priority,
+ SchedulerNode node, RMContainer rmContainer) {
+ // Done with the reservation?
+ application.unreserve(node, priority);
+ node.unreserveResource(application);
+
+ // Update reserved metrics
+ getMetrics().unreserveResource(
+ application.getUser(), rmContainer.getContainer().getResource());
+ }
+
+
+ @Override
+ public void completedContainer(Resource clusterResource,
+ SchedulerApp application, SchedulerNode node, RMContainer rmContainer,
+ RMContainerEventType event) {
+ if (application != null) {
+ // Careful! Locking order is important!
+ synchronized (this) {
+
+ Container container = rmContainer.getContainer();
+
+ // Inform the application & the node
+ // Note: It's safe to assume that all state changes to RMContainer
+ // happen under scheduler's lock...
+ // So, this is, in effect, a transaction across application & node
+ if (rmContainer.getState() == RMContainerState.RESERVED) {
+ application.unreserve(node, rmContainer.getReservedPriority());
+ node.unreserveResource(application);
+ } else {
+ application.containerCompleted(rmContainer, event);
+ node.releaseContainer(container);
+ }
+
+
+ // Book-keeping
+ releaseResource(clusterResource,
+ application.getUser(), container.getResource());
+
+ LOG.info("completedContainer" +
+ " container=" + container +
+ " resource=" + container.getResource() +
+ " queue=" + this +
+ " util=" + getUtilization() +
+ " used=" + usedResources +
+ " cluster=" + clusterResource);
+ }
+
+ // Inform the parent queue
+ parent.completedContainer(clusterResource, application,
+ node, rmContainer, event);
+ }
+ }
+
+ synchronized void allocateResource(Resource clusterResource,
+ String userName, Resource resource) {
+ // Update queue metrics
+ Resources.addTo(usedResources, resource);
+ updateResource(clusterResource);
+ ++numContainers;
+
+ // Update user metrics
+ User user = getUser(userName);
+ user.assignContainer(resource);
+
+ LOG.info(getQueueName() +
+ " used=" + usedResources + " numContainers=" + numContainers +
+ " user=" + userName + " resources=" + user.getConsumedResources());
+ }
+
+ synchronized void releaseResource(Resource clusterResource,
+ String userName, Resource resource) {
+ // Update queue metrics
+ Resources.subtractFrom(usedResources, resource);
+ updateResource(clusterResource);
+ --numContainers;
+
+ // Update user metrics
+ User user = getUser(userName);
+ user.releaseContainer(resource);
+
+ LOG.info(getQueueName() +
+ " used=" + usedResources + " numContainers=" + numContainers +
+ " user=" + userName + " resources=" + user.getConsumedResources());
+ }
+
+ @Override
+ public synchronized void updateResource(Resource clusterResource) {
+ float queueLimit = clusterResource.getMemory() * absoluteCapacity;
+ setUtilization(usedResources.getMemory() / queueLimit);
+ setUsedCapacity(
+ usedResources.getMemory() / (clusterResource.getMemory() * capacity));
+
+ Resource resourceLimit =
+ Resources.createResource((int)queueLimit);
+ metrics.setAvailableResourcesToQueue(
+ Resources.subtractFrom(resourceLimit, usedResources));
+ }
+
+ @Override
+ public QueueMetrics getMetrics() {
+ return metrics;
+ }
+
+ static class User {
+ Resource consumed = Resources.createResource(0);
+ int applications = 0;
+
+ public Resource getConsumedResources() {
+ return consumed;
+ }
+
+ public int getApplications() {
+ return applications;
+ }
+
+ public synchronized void submitApplication() {
+ ++applications;
+ }
+
+ public synchronized void finishApplication() {
+ --applications;
+ }
+
+ public synchronized void assignContainer(Resource resource) {
+ Resources.addTo(consumed, resource);
+ }
+
+ public synchronized void releaseContainer(Resource resource) {
+ Resources.subtractFrom(consumed, resource);
+ }
+ }
+
+ @Override
+ public void recoverContainer(Resource clusterResource,
+ SchedulerApp application, Container container) {
+ // Careful! Locking order is important!
+ synchronized (this) {
+ allocateResource(clusterResource, application.getUser(), container.getResource());
+ }
+ parent.recoverContainer(clusterResource, application, container);
+
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
new file mode 100644
index 0000000..7aa37fc
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -0,0 +1,678 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.QueueACL;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.api.records.QueueState;
+import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+
+@Private
+@Evolving
+public class ParentQueue implements Queue {
+
+ private static final Log LOG = LogFactory.getLog(ParentQueue.class);
+
+ private final Queue parent;
+ private final String queueName;
+
+ private float capacity;
+ private float maximumCapacity;
+ private float absoluteCapacity;
+ private float absoluteMaxCapacity;
+
+ private float usedCapacity = 0.0f;
+ private float utilization = 0.0f;
+
+ private final Set<Queue> childQueues;
+ private final Comparator<Queue> queueComparator;
+
+ private Resource usedResources =
+ Resources.createResource(0);
+
+ private final boolean rootQueue;
+
+ private final Resource minimumAllocation;
+
+ private volatile int numApplications;
+ private volatile int numContainers;
+
+ private QueueState state;
+
+ private final QueueMetrics metrics;
+
+ private QueueInfo queueInfo;
+
+ private Map<QueueACL, AccessControlList> acls =
+ new HashMap<QueueACL, AccessControlList>();
+
+ private final RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(null);
+
+ public ParentQueue(CapacitySchedulerContext cs,
+ String queueName, Comparator<Queue> comparator, Queue parent, Queue old) {
+ minimumAllocation = cs.getMinimumResourceCapability();
+
+ this.parent = parent;
+ this.queueName = queueName;
+ this.rootQueue = (parent == null);
+
+ // must be called after parent and queueName is set
+ this.metrics = old != null ? old.getMetrics() :
+ QueueMetrics.forQueue(getQueuePath(), parent,
+ cs.getConfiguration().getEnableUserMetrics());
+
+ int rawCapacity = cs.getConfiguration().getCapacity(getQueuePath());
+
+ if (rootQueue &&
+ (rawCapacity != CapacitySchedulerConfiguration.MAXIMUM_CAPACITY_VALUE)) {
+ throw new IllegalArgumentException("Illegal " +
+ "capacity of " + rawCapacity + " for queue " + queueName +
+ ". Must be " + CapacitySchedulerConfiguration.MAXIMUM_CAPACITY_VALUE);
+ }
+
+ float capacity = (float) rawCapacity / 100;
+
+ float parentAbsoluteCapacity =
+ (parent == null) ? 1.0f : parent.getAbsoluteCapacity();
+ float absoluteCapacity = parentAbsoluteCapacity * capacity;
+
+ float maximumCapacity =
+ cs.getConfiguration().getMaximumCapacity(getQueuePath());
+ float absoluteMaxCapacity =
+ (maximumCapacity == CapacitySchedulerConfiguration.UNDEFINED) ?
+ 1000000000f : (parentAbsoluteCapacity * maximumCapacity) / 100;
+
+ QueueState state = cs.getConfiguration().getState(getQueuePath());
+
+ Map<QueueACL, AccessControlList> acls =
+ cs.getConfiguration().getAcls(getQueuePath());
+
+ this.queueInfo = recordFactory.newRecordInstance(QueueInfo.class);
+ this.queueInfo.setQueueName(queueName);
+ this.queueInfo.setChildQueues(new ArrayList<QueueInfo>());
+
+ setupQueueConfigs(capacity, absoluteCapacity,
+ maximumCapacity, absoluteMaxCapacity, state, acls);
+
+ this.queueComparator = comparator;
+ this.childQueues = new TreeSet<Queue>(comparator);
+
+ LOG.info("Initialized parent-queue " + queueName +
+ " name=" + queueName +
+ ", fullname=" + getQueuePath());
+ }
+
+ private synchronized void setupQueueConfigs(
+ float capacity, float absoluteCapacity,
+ float maximumCapacity, float absoluteMaxCapacity,
+ QueueState state, Map<QueueACL, AccessControlList> acls
+ ) {
+ this.capacity = capacity;
+ this.absoluteCapacity = absoluteCapacity;
+ this.maximumCapacity = maximumCapacity;
+ this.absoluteMaxCapacity = absoluteMaxCapacity;
+
+ this.state = state;
+
+ this.acls = acls;
+
+ this.queueInfo.setCapacity(capacity);
+ this.queueInfo.setMaximumCapacity(maximumCapacity);
+ this.queueInfo.setQueueState(state);
+
+ StringBuilder aclsString = new StringBuilder();
+ for (Map.Entry<QueueACL, AccessControlList> e : acls.entrySet()) {
+ aclsString.append(e.getKey() + ":" + e.getValue().getAclString());
+ }
+
+ LOG.info(queueName +
+ ", capacity=" + capacity +
+ ", asboluteCapacity=" + absoluteCapacity +
+ ", maxCapacity=" + maximumCapacity +
+ ", asboluteMaxCapacity=" + absoluteMaxCapacity +
+ ", state=" + state +
+ ", acls=" + aclsString);
+ }
+
+ private static float PRECISION = 0.005f; // 0.05% precision
+ void setChildQueues(Collection<Queue> childQueues) {
+
+ // Validate
+ float childCapacities = 0;
+ for (Queue queue : childQueues) {
+ childCapacities += queue.getCapacity();
+ }
+ float delta = Math.abs(1.0f - childCapacities); // crude way to check
+ if (delta > PRECISION) {
+ throw new IllegalArgumentException("Illegal" +
+ " capacity of " + childCapacities +
+ " for children of queue " + queueName);
+ }
+
+ this.childQueues.clear();
+ this.childQueues.addAll(childQueues);
+ LOG.info("DEBUG --- setChildQueues: " + getChildQueuesToPrint());
+ }
+
+ @Override
+ public Queue getParent() {
+ return parent;
+ }
+
+ @Override
+ public String getQueueName() {
+ return queueName;
+ }
+
+ @Override
+ public String getQueuePath() {
+ String parentPath = ((parent == null) ? "" : (parent.getQueuePath() + "."));
+ return parentPath + getQueueName();
+ }
+
+ @Override
+ public synchronized float getCapacity() {
+ return capacity;
+ }
+
+ @Override
+ public synchronized float getAbsoluteCapacity() {
+ return absoluteCapacity;
+ }
+
+ @Override
+ public float getAbsoluteMaximumCapacity() {
+ return 0;
+ }
+
+ @Override
+ public float getMaximumCapacity() {
+ return 0;
+ }
+
+ @Override
+ public synchronized float getUsedCapacity() {
+ return usedCapacity;
+ }
+
+ @Override
+ public synchronized Resource getUsedResources() {
+ return usedResources;
+ }
+
+ @Override
+ public synchronized float getUtilization() {
+ return utilization;
+ }
+
+ @Override
+ public synchronized List<Queue> getChildQueues() {
+ return new ArrayList<Queue>(childQueues);
+ }
+
+ public synchronized int getNumContainers() {
+ return numContainers;
+ }
+
+ public synchronized int getNumApplications() {
+ return numApplications;
+ }
+
+ @Override
+ public synchronized QueueState getState() {
+ return state;
+ }
+
+ @Override
+ public synchronized Map<QueueACL, AccessControlList> getQueueAcls() {
+ return new HashMap<QueueACL, AccessControlList>(acls);
+ }
+
+ @Override
+ public synchronized QueueInfo getQueueInfo(
+ boolean includeChildQueues, boolean recursive) {
+ queueInfo.setCurrentCapacity(usedCapacity);
+
+ List<QueueInfo> childQueuesInfo = new ArrayList<QueueInfo>();
+ if (includeChildQueues) {
+ for (Queue child : childQueues) {
+ // Get queue information recursively?
+ childQueuesInfo.add(
+ child.getQueueInfo(recursive, recursive));
+ }
+ }
+ queueInfo.setChildQueues(childQueuesInfo);
+
+ return queueInfo;
+ }
+
+ private synchronized QueueUserACLInfo getUserAclInfo(
+ UserGroupInformation user) {
+ QueueUserACLInfo userAclInfo =
+ recordFactory.newRecordInstance(QueueUserACLInfo.class);
+ List<QueueACL> operations = new ArrayList<QueueACL>();
+ for (Map.Entry<QueueACL, AccessControlList> e : acls.entrySet()) {
+ QueueACL operation = e.getKey();
+ AccessControlList acl = e.getValue();
+
+ if (acl.isUserAllowed(user)) {
+ operations.add(operation);
+ }
+ }
+
+ userAclInfo.setQueueName(getQueueName());
+ userAclInfo.setUserAcls(operations);
+ return userAclInfo;
+ }
+
+ @Override
+ public synchronized List<QueueUserACLInfo> getQueueUserAclInfo(
+ UserGroupInformation user) {
+ List<QueueUserACLInfo> userAcls = new ArrayList<QueueUserACLInfo>();
+
+ // Add parent queue acls
+ userAcls.add(getUserAclInfo(user));
+
+ // Add children queue acls
+ for (Queue child : childQueues) {
+ userAcls.addAll(child.getQueueUserAclInfo(user));
+ }
+ return userAcls;
+ }
+
+ public String toString() {
+ return queueName + ":" + capacity + ":" + absoluteCapacity + ":" +
+ getUsedCapacity() + ":" + getUtilization() + ":" +
+ getNumApplications() + ":" + getNumContainers() + ":" +
+ childQueues.size() + " child-queues";
+ }
+
+ @Override
+ public synchronized void reinitialize(Queue queue, Resource clusterResource)
+ throws IOException {
+ // Sanity check
+ if (!(queue instanceof ParentQueue) ||
+ !queue.getQueuePath().equals(getQueuePath())) {
+ throw new IOException("Trying to reinitialize " + getQueuePath() +
+ " from " + queue.getQueuePath());
+ }
+
+ ParentQueue parentQueue = (ParentQueue)queue;
+
+ // Re-configure existing child queues and add new ones
+ // The CS has already checked to ensure all existing child queues are present!
+ Map<String, Queue> currentChildQueues = getQueues(childQueues);
+ Map<String, Queue> newChildQueues = getQueues(parentQueue.childQueues);
+ for (Map.Entry<String, Queue> e : newChildQueues.entrySet()) {
+ String newChildQueueName = e.getKey();
+ Queue newChildQueue = e.getValue();
+
+ Queue childQueue = currentChildQueues.get(newChildQueueName);
+ if (childQueue != null){
+ childQueue.reinitialize(newChildQueue, clusterResource);
+ LOG.info(getQueueName() + ": re-configured queue: " + childQueue);
+ } else {
+ currentChildQueues.put(newChildQueueName, newChildQueue);
+ LOG.info(getQueueName() + ": added new child queue: " + newChildQueue);
+ }
+ }
+
+ // Re-sort all queues
+ childQueues.clear();
+ childQueues.addAll(currentChildQueues.values());
+
+ // Set new configs
+ setupQueueConfigs(parentQueue.capacity, parentQueue.absoluteCapacity,
+ parentQueue.maximumCapacity, parentQueue.absoluteMaxCapacity,
+ parentQueue.state, parentQueue.acls);
+
+ // Update
+ updateResource(clusterResource);
+ }
+
+ Map<String, Queue> getQueues(Set<Queue> queues) {
+ Map<String, Queue> queuesMap = new HashMap<String, Queue>();
+ for (Queue queue : queues) {
+ queuesMap.put(queue.getQueueName(), queue);
+ }
+ return queuesMap;
+ }
+
+ @Override
+ public boolean hasAccess(QueueACL acl, UserGroupInformation user) {
+ synchronized (this) {
+ if (acls.get(acl).isUserAllowed(user)) {
+ return true;
+ }
+ }
+
+ if (parent != null) {
+ return parent.hasAccess(acl, user);
+ }
+
+ return false;
+ }
+
+ @Override
+ public void submitApplication(SchedulerApp application, String user,
+ String queue) throws AccessControlException {
+
+ synchronized (this) {
+ // Sanity check
+ if (queue.equals(queueName)) {
+ throw new AccessControlException("Cannot submit application " +
+ "to non-leaf queue: " + queueName);
+ }
+
+ if (state != QueueState.RUNNING) {
+ throw new AccessControlException("Queue " + getQueuePath() +
+ " is STOPPED. Cannot accept submission of application: " +
+ application.getApplicationId());
+ }
+
+ addApplication(application, user);
+ }
+
+ // Inform the parent queue
+ if (parent != null) {
+ try {
+ parent.submitApplication(application, user, queue);
+ } catch (AccessControlException ace) {
+ LOG.info("Failed to submit application to parent-queue: " +
+ parent.getQueuePath(), ace);
+ removeApplication(application, user);
+ throw ace;
+ }
+ }
+ }
+
+ private synchronized void addApplication(SchedulerApp application,
+ String user) {
+
+ ++numApplications;
+
+ LOG.info("Application added -" +
+ " appId: " + application.getApplicationId() +
+ " user: " + user +
+ " leaf-queue of parent: " + getQueueName() +
+ " #applications: " + getNumApplications());
+ }
+
+ @Override
+ public void finishApplication(SchedulerApp application, String queue) {
+
+ synchronized (this) {
+ removeApplication(application, application.getUser());
+ }
+
+ // Inform the parent queue
+ if (parent != null) {
+ parent.finishApplication(application, queue);
+ }
+ }
+
+ public synchronized void removeApplication(SchedulerApp application,
+ String user) {
+
+ --numApplications;
+
+ LOG.info("Application removed -" +
+ " appId: " + application.getApplicationId() +
+ " user: " + user +
+ " leaf-queue of parent: " + getQueueName() +
+ " #applications: " + getNumApplications());
+ }
+
+ synchronized void setUsedCapacity(float usedCapacity) {
+ this.usedCapacity = usedCapacity;
+ }
+
+ synchronized void setUtilization(float utilization) {
+ this.utilization = utilization;
+ }
+
+ /**
+ * Set maximum capacity - used only for testing.
+ * @param maximumCapacity new max capacity
+ */
+ synchronized void setMaxCapacity(float maximumCapacity) {
+ this.maximumCapacity = maximumCapacity;
+ float parentAbsoluteCapacity =
+ (rootQueue) ? 100.0f : parent.getAbsoluteCapacity();
+ this.absoluteMaxCapacity =
+ (maximumCapacity == CapacitySchedulerConfiguration.UNDEFINED) ?
+ Float.MAX_VALUE :
+ (parentAbsoluteCapacity * maximumCapacity);
+ }
+
+ @Override
+ public synchronized Resource assignContainers(
+ Resource clusterResource, SchedulerNode node) {
+ Resource assigned = Resources.createResource(0);
+
+ while (canAssign(node)) {
+ LOG.info("DEBUG --- Trying to assign containers to child-queue of " +
+ getQueueName());
+
+ // Are we over maximum-capacity for this queue?
+ if (!assignToQueue(clusterResource)) {
+ break;
+ }
+
+ // Schedule
+ Resource assignedToChild =
+ assignContainersToChildQueues(clusterResource, node);
+
+ // Done if no child-queue assigned anything
+ if (Resources.greaterThan(assignedToChild, Resources.none())) {
+ // Track resource utilization for the parent-queue
+ allocateResource(clusterResource, assignedToChild);
+
+ // Track resource utilization in this pass of the scheduler
+ Resources.addTo(assigned, assignedToChild);
+
+ LOG.info("assignedContainer" +
+ " queue=" + getQueueName() +
+ " util=" + getUtilization() +
+ " used=" + usedResources +
+ " cluster=" + clusterResource);
+
+ } else {
+ break;
+ }
+
+ LOG.info("DEBUG ---" +
+ " parentQ=" + getQueueName() +
+ " assignedSoFarInThisIteration=" + assigned +
+ " utilization=" + getUtilization());
+
+ // Do not assign more than one container if this isn't the root queue
+ if (!rootQueue) {
+ break;
+ }
+ }
+
+ return assigned;
+ }
+
+ private synchronized boolean assignToQueue(Resource clusterResource) {
+ // Check how of the cluster's absolute capacity we are currently using...
+ float currentCapacity =
+ (float)(usedResources.getMemory()) / clusterResource.getMemory();
+ if (currentCapacity >= absoluteMaxCapacity) {
+ LOG.info(getQueueName() +
+ " used=" + usedResources.getMemory() +
+ " current-capacity (" + currentCapacity + ") " +
+ " >= max-capacity (" + absoluteMaxCapacity + ")");
+ return false;
+ }
+ return true;
+
+ }
+
+ private boolean canAssign(SchedulerNode node) {
+ return (node.getReservedContainer() == null) &&
+ Resources.greaterThanOrEqual(node.getAvailableResource(),
+ minimumAllocation);
+ }
+
+ synchronized Resource assignContainersToChildQueues(Resource cluster,
+ SchedulerNode node) {
+ Resource assigned = Resources.createResource(0);
+
+ printChildQueues();
+
+ // Try to assign to most 'under-served' sub-queue
+ for (Iterator<Queue> iter=childQueues.iterator(); iter.hasNext();) {
+ Queue childQueue = iter.next();
+ LOG.info("DEBUG --- Trying to assign to" +
+ " queue: " + childQueue.getQueuePath() +
+ " stats: " + childQueue);
+ assigned = childQueue.assignContainers(cluster, node);
+ LOG.info("DEBUG --- Assignedto" +
+ " queue: " + childQueue.getQueuePath() +
+ " stats: " + childQueue + " --> " + assigned.getMemory());
+
+ // If we do assign, remove the queue and re-insert in-order to re-sort
+ if (Resources.greaterThan(assigned, Resources.none())) {
+ // Remove and re-insert to sort
+ iter.remove();
+ LOG.info("Re-sorting queues since queue: " + childQueue.getQueuePath() +
+ " stats: " + childQueue);
+ childQueues.add(childQueue);
+ printChildQueues();
+ break;
+ }
+ }
+
+ return assigned;
+ }
+
+ String getChildQueuesToPrint() {
+ StringBuilder sb = new StringBuilder();
+ for (Queue q : childQueues) {
+ sb.append(q.getQueuePath() + "(" + q.getUtilization() + "), ");
+ }
+ return sb.toString();
+ }
+ void printChildQueues() {
+ LOG.info("DEBUG --- printChildQueues - queue: " + getQueuePath() +
+ " child-queues: " + getChildQueuesToPrint());
+ }
+
+ @Override
+ public void completedContainer(Resource clusterResource,
+ SchedulerApp application, SchedulerNode node,
+ RMContainer rmContainer, RMContainerEventType event) {
+ if (application != null) {
+ // Careful! Locking order is important!
+ // Book keeping
+ synchronized (this) {
+ releaseResource(clusterResource,
+ rmContainer.getContainer().getResource());
+
+ LOG.info("completedContainer" +
+ " queue=" + getQueueName() +
+ " util=" + getUtilization() +
+ " used=" + usedResources +
+ " cluster=" + clusterResource);
+ }
+
+ // Inform the parent
+ if (parent != null) {
+ parent.completedContainer(clusterResource, application,
+ node, rmContainer, event);
+ }
+ }
+ }
+
+ synchronized void allocateResource(Resource clusterResource,
+ Resource resource) {
+ Resources.addTo(usedResources, resource);
+ updateResource(clusterResource);
+ ++numContainers;
+ }
+
+ synchronized void releaseResource(Resource clusterResource,
+ Resource resource) {
+ Resources.subtractFrom(usedResources, resource);
+ updateResource(clusterResource);
+ --numContainers;
+ }
+
+ @Override
+ public synchronized void updateResource(Resource clusterResource) {
+ float queueLimit = clusterResource.getMemory() * absoluteCapacity;
+ setUtilization(usedResources.getMemory() / queueLimit);
+ setUsedCapacity(
+ usedResources.getMemory() / (clusterResource.getMemory() * capacity));
+
+ Resource resourceLimit =
+ Resources.createResource((int)queueLimit);
+ metrics.setAvailableResourcesToQueue(
+ Resources.subtractFrom(resourceLimit, usedResources));
+ }
+
+ @Override
+ public QueueMetrics getMetrics() {
+ return metrics;
+ }
+
+
+ @Override
+ public void recoverContainer(Resource clusterResource,
+ SchedulerApp application, Container container) {
+ // Careful! Locking order is important!
+ synchronized (this) {
+ allocateResource(clusterResource, container.getResource());
+ }
+ if (parent != null) {
+ parent.recoverContainer(clusterResource, application, container);
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/Queue.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/Queue.java
new file mode 100644
index 0000000..4bd486e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/Queue.java
@@ -0,0 +1,204 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.QueueACL;
+import org.apache.hadoop.yarn.api.records.QueueState;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+
+/**
+ * Queue represents a node in the tree of
+ * hierarchical queues in the {@link CapacityScheduler}.
+ */
+@Stable
+@Private
+public interface Queue
+extends org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue {
+ /**
+ * Get the parent <code>Queue</code>.
+ * @return the parent queue
+ */
+ public Queue getParent();
+
+ /**
+ * Get the queue name.
+ * @return the queue name
+ */
+ public String getQueueName();
+
+ /**
+ * Get the full name of the queue, including the heirarchy.
+ * @return the full name of the queue
+ */
+ public String getQueuePath();
+
+ /**
+ * Get the configured <em>capacity</em> of the queue.
+ * @return queue capacity
+ */
+ public float getCapacity();
+
+ /**
+ * Get capacity of the parent of the queue as a function of the
+ * cumulative capacity in the cluster.
+ * @return capacity of the parent of the queue as a function of the
+ * cumulative capacity in the cluster
+ */
+ public float getAbsoluteCapacity();
+
+ /**
+ * Get the configured maximum-capacity of the queue.
+ * @return the configured maximum-capacity of the queue
+ */
+ public float getMaximumCapacity();
+
+ /**
+ * Get maximum-capacity of the queue as a funciton of the cumulative capacity
+ * of the cluster.
+ * @return maximum-capacity of the queue as a funciton of the cumulative capacity
+ * of the cluster
+ */
+ public float getAbsoluteMaximumCapacity();
+
+ /**
+ * Get the currently utilized capacity of the queue
+ * relative to it's parent queue.
+ * @return the currently utilized capacity of the queue
+ * relative to it's parent queue
+ */
+ public float getUsedCapacity();
+
+ /**
+ * Get the currently utilized resources in the cluster
+ * by the queue and children (if any).
+ * @return used resources by the queue and it's children
+ */
+ public Resource getUsedResources();
+
+ /**
+ * Get the current <em>utilization</em> of the queue
+ * and it's children (if any).
+ * Utilization is defined as the ratio of
+ * <em>used-capacity over configured-capacity</em> of the queue.
+ * @return queue utilization
+ */
+ public float getUtilization();
+
+ /**
+ * Get the current run-state of the queue
+ * @return current run-state
+ */
+ public QueueState getState();
+
+ /**
+ * Get child queues
+ * @return child queues
+ */
+ public List<Queue> getChildQueues();
+
+ /**
+ * Check if the <code>user</code> has permission to perform the operation
+ * @param acl ACL
+ * @param user user
+ * @return <code>true</code> if the user has the permission,
+ * <code>false</code> otherwise
+ */
+ public boolean hasAccess(QueueACL acl, UserGroupInformation user);
+
+ /**
+ * Submit a new application to the queue.
+ * @param application application being submitted
+ * @param user user who submitted the application
+ * @param queue queue to which the application is submitted
+ */
+ public void submitApplication(SchedulerApp application, String user,
+ String queue)
+ throws AccessControlException;
+
+ /**
+ * An application submitted to this queue has finished.
+ * @param application
+ * @param queue application queue
+ */
+ public void finishApplication(SchedulerApp application, String queue);
+
+ /**
+ * Assign containers to applications in the queue or it's children (if any).
+ * @param clusterResource the resource of the cluster.
+ * @param node node on which resources are available
+ * @return
+ */
+ public Resource assignContainers(Resource clusterResource, SchedulerNode node);
+
+ /**
+ * A container assigned to the queue has completed.
+ * @param clusterResource the resource of the cluster
+ * @param application application to which the container was assigned
+ * @param node node on which the container completed
+ * @param container completed container,
+ * <code>null</code> if it was just a reservation
+ * @param event event to be sent to the container
+ */
+ public void completedContainer(Resource clusterResource,
+ SchedulerApp application, SchedulerNode node,
+ RMContainer container, RMContainerEventType event);
+
+ /**
+ * Get the number of applications in the queue.
+ * @return number of applications
+ */
+ public int getNumApplications();
+
+
+ /**
+ * Reinitialize the queue.
+ * @param queue new queue to re-initalize from
+ * @param clusterResource resources in the cluster
+ */
+ public void reinitialize(Queue queue, Resource clusterResource)
+ throws IOException;
+
+ /**
+ * Update the cluster resource for queues as we add/remove nodes
+ * @param clusterResource the current cluster resource
+ */
+ public void updateResource(Resource clusterResource);
+
+ /**
+ * Recover the state of the queue
+ * @param clusterResource the resource of the cluster
+ * @param application the application for which the container was allocated
+ * @param container the container that was recovered.
+ */
+ public void recoverContainer(Resource clusterResource, SchedulerApp application,
+ Container container);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java
new file mode 100644
index 0000000..af7ac79
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java
@@ -0,0 +1,31 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.event;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+
+public class AppAddedSchedulerEvent extends SchedulerEvent {
+
+ private final ApplicationAttemptId applicationAttemptId;
+ private final String queue;
+ private final String user;
+
+ public AppAddedSchedulerEvent(ApplicationAttemptId applicationAttemptId,
+ String queue, String user) {
+ super(SchedulerEventType.APP_ADDED);
+ this.applicationAttemptId = applicationAttemptId;
+ this.queue = queue;
+ this.user = user;
+ }
+
+ public ApplicationAttemptId getApplicationAttemptId() {
+ return applicationAttemptId;
+ }
+
+ public String getQueue() {
+ return queue;
+ }
+
+ public String getUser() {
+ return user;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppRemovedSchedulerEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppRemovedSchedulerEvent.java
new file mode 100644
index 0000000..560f526
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppRemovedSchedulerEvent.java
@@ -0,0 +1,25 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.event;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+
+public class AppRemovedSchedulerEvent extends SchedulerEvent {
+
+ private final ApplicationAttemptId applicationAttemptId;
+ private final RMAppAttemptState finalAttemptState;
+
+ public AppRemovedSchedulerEvent(ApplicationAttemptId applicationAttemptId,
+ RMAppAttemptState finalAttemptState) {
+ super(SchedulerEventType.APP_REMOVED);
+ this.applicationAttemptId = applicationAttemptId;
+ this.finalAttemptState = finalAttemptState;
+ }
+
+ public ApplicationAttemptId getApplicationAttemptID() {
+ return this.applicationAttemptId;
+ }
+
+ public RMAppAttemptState getFinalAttemptState() {
+ return this.finalAttemptState;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/ContainerExpiredSchedulerEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/ContainerExpiredSchedulerEvent.java
new file mode 100644
index 0000000..fd09d84
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/ContainerExpiredSchedulerEvent.java
@@ -0,0 +1,24 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.event;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
+
+/**
+ * The {@link SchedulerEvent} which notifies that a {@link ContainerId}
+ * has expired, sent by {@link ContainerAllocationExpirer}
+ *
+ */
+public class ContainerExpiredSchedulerEvent extends SchedulerEvent {
+
+ private final ContainerId containerId;
+
+ public ContainerExpiredSchedulerEvent(ContainerId containerId) {
+ super(SchedulerEventType.CONTAINER_EXPIRED);
+ this.containerId = containerId;
+ }
+
+ public ContainerId getContainerId() {
+ return containerId;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeAddedSchedulerEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeAddedSchedulerEvent.java
new file mode 100644
index 0000000..2049fb3
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeAddedSchedulerEvent.java
@@ -0,0 +1,18 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.event;
+
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+
+public class NodeAddedSchedulerEvent extends SchedulerEvent {
+
+ private final RMNode rmNode;
+
+ public NodeAddedSchedulerEvent(RMNode rmNode) {
+ super(SchedulerEventType.NODE_ADDED);
+ this.rmNode = rmNode;
+ }
+
+ public RMNode getAddedRMNode() {
+ return rmNode;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeRemovedSchedulerEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeRemovedSchedulerEvent.java
new file mode 100644
index 0000000..2f6fecf
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeRemovedSchedulerEvent.java
@@ -0,0 +1,18 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.event;
+
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+
+public class NodeRemovedSchedulerEvent extends SchedulerEvent {
+
+ private final RMNode rmNode;
+
+ public NodeRemovedSchedulerEvent(RMNode rmNode) {
+ super(SchedulerEventType.NODE_REMOVED);
+ this.rmNode = rmNode;
+ }
+
+ public RMNode getRemovedRMNode() {
+ return rmNode;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeUpdateSchedulerEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeUpdateSchedulerEvent.java
new file mode 100644
index 0000000..9fd72d2
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeUpdateSchedulerEvent.java
@@ -0,0 +1,30 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.event;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+
+public class NodeUpdateSchedulerEvent extends SchedulerEvent {
+
+ private final RMNode rmNode;
+ private final Map<ApplicationId, List<Container>> containers;
+
+ public NodeUpdateSchedulerEvent(RMNode rmNode,
+ Map<ApplicationId, List<Container>> containers) {
+ super(SchedulerEventType.NODE_UPDATE);
+ this.rmNode = rmNode;
+ this.containers = containers;
+ }
+
+ public RMNode getRMNode() {
+ return rmNode;
+ }
+
+ public Map<ApplicationId, List<Container>> getContainers() {
+ return containers;
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/SchedulerEvent.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/SchedulerEvent.java
new file mode 100644
index 0000000..6ce7421
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/SchedulerEvent.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.event;
+
+import org.apache.hadoop.yarn.event.AbstractEvent;
+
+public class SchedulerEvent extends AbstractEvent<SchedulerEventType> {
+ public SchedulerEvent(SchedulerEventType type) {
+ super(type);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/SchedulerEventType.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/SchedulerEventType.java
new file mode 100644
index 0000000..f75f2d1
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/SchedulerEventType.java
@@ -0,0 +1,16 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.event;
+
+public enum SchedulerEventType {
+
+ // Source: Node
+ NODE_ADDED,
+ NODE_REMOVED,
+ NODE_UPDATE,
+
+ // Source: App
+ APP_ADDED,
+ APP_REMOVED,
+
+ // Source: ContainerAllocationExpirer
+ CONTAINER_EXPIRED
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
new file mode 100644
index 0000000..be6c904
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
@@ -0,0 +1,731 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.yarn.Lock;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerToken;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.QueueACL;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store.RMState;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
+import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+
+@LimitedPrivate("yarn")
+@Evolving
+public class FifoScheduler implements ResourceScheduler {
+
+ private static final Log LOG = LogFactory.getLog(FifoScheduler.class);
+
+ private final RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(null);
+
+ Configuration conf;
+ private ContainerTokenSecretManager containerTokenSecretManager;
+
+ private final static Container[] EMPTY_CONTAINER_ARRAY = new Container[] {};
+ private final static List<Container> EMPTY_CONTAINER_LIST = Arrays.asList(EMPTY_CONTAINER_ARRAY);
+ private RMContext rmContext;
+
+ private Map<NodeId, SchedulerNode> nodes = new ConcurrentHashMap<NodeId, SchedulerNode>();
+
+ private static final int MINIMUM_MEMORY = 1024;
+
+ private static final String FIFO_PREFIX =
+ YarnConfiguration.RM_PREFIX + "fifo.";
+ @Private
+ public static final String MINIMUM_ALLOCATION =
+ FIFO_PREFIX + "minimum-allocation-mb";
+
+ private static final int MAXIMUM_MEMORY = 10240;
+
+ @Private
+ public static final String MAXIMUM_ALLOCATION =
+ FIFO_PREFIX + "maximum-allocation-mb";
+
+ private boolean initialized;
+ private Resource minimumAllocation;
+ private Resource maximumAllocation;
+
+ private Map<ApplicationAttemptId, SchedulerApp> applications
+ = new TreeMap<ApplicationAttemptId, SchedulerApp>();
+
+ private static final String DEFAULT_QUEUE_NAME = "default";
+ private final QueueMetrics metrics =
+ QueueMetrics.forQueue(DEFAULT_QUEUE_NAME, null, false);
+
+ private final Queue DEFAULT_QUEUE = new Queue() {
+ @Override
+ public String getQueueName() {
+ return DEFAULT_QUEUE_NAME;
+ }
+
+ @Override
+ public QueueMetrics getMetrics() {
+ return metrics;
+ }
+
+ @Override
+ public QueueInfo getQueueInfo(
+ boolean includeChildQueues, boolean recursive) {
+ QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class);
+ queueInfo.setQueueName(DEFAULT_QUEUE.getQueueName());
+ queueInfo.setCapacity(100.0f);
+ queueInfo.setMaximumCapacity(100.0f);
+ queueInfo.setChildQueues(new ArrayList<QueueInfo>());
+ return queueInfo;
+ }
+
+ @Override
+ public Map<QueueACL, AccessControlList> getQueueAcls() {
+ Map<QueueACL, AccessControlList> acls =
+ new HashMap<QueueACL, AccessControlList>();
+ for (QueueACL acl : QueueACL.values()) {
+ acls.put(acl, new AccessControlList("*"));
+ }
+ return acls;
+ }
+
+ @Override
+ public List<QueueUserACLInfo> getQueueUserAclInfo(
+ UserGroupInformation unused) {
+ QueueUserACLInfo queueUserAclInfo =
+ recordFactory.newRecordInstance(QueueUserACLInfo.class);
+ queueUserAclInfo.setQueueName(DEFAULT_QUEUE_NAME);
+ queueUserAclInfo.setUserAcls(Arrays.asList(QueueACL.values()));
+ return Collections.singletonList(queueUserAclInfo);
+ }
+ };
+
+ public synchronized Resource getUsedResource(NodeId nodeId) {
+ return getNode(nodeId).getUsedResource();
+ }
+
+ public synchronized Resource getAvailableResource(NodeId nodeId) {
+ return getNode(nodeId).getAvailableResource();
+ }
+
+ @Override
+ public Resource getMinimumResourceCapability() {
+ return minimumAllocation;
+ }
+
+ @Override
+ public Resource getMaximumResourceCapability() {
+ return maximumAllocation;
+ }
+
+ @Override
+ public synchronized void reinitialize(Configuration conf,
+ ContainerTokenSecretManager containerTokenSecretManager,
+ RMContext rmContext)
+ throws IOException
+ {
+ if (!this.initialized) {
+ this.conf = conf;
+ this.containerTokenSecretManager = containerTokenSecretManager;
+ this.rmContext = rmContext;
+ this.minimumAllocation =
+ Resources.createResource(conf.getInt(MINIMUM_ALLOCATION, MINIMUM_MEMORY));
+ this.maximumAllocation =
+ Resources.createResource(conf.getInt(MAXIMUM_ALLOCATION, MAXIMUM_MEMORY));
+ this.initialized = true;
+ } else {
+ this.conf = conf;
+ }
+ }
+
+ private static final Allocation EMPTY_ALLOCATION =
+ new Allocation(EMPTY_CONTAINER_LIST, Resources.createResource(0));
+ @Override
+ public Allocation allocate(
+ ApplicationAttemptId applicationAttemptId, List<ResourceRequest> ask,
+ List<ContainerId> release) {
+ SchedulerApp application = getApplication(applicationAttemptId);
+ if (application == null) {
+ LOG.error("Calling allocate on removed " +
+ "or non existant application " + applicationAttemptId);
+ return EMPTY_ALLOCATION;
+ }
+
+ // Sanity check
+ normalizeRequests(ask);
+
+ // Release containers
+ for (ContainerId releasedContainer : release) {
+ containerCompleted(getRMContainer(releasedContainer),
+ RMContainerEventType.RELEASED);
+ }
+
+ if (!ask.isEmpty()) {
+ LOG.debug("allocate: pre-update" +
+ " applicationId=" + applicationAttemptId +
+ " application=" + application);
+ application.showRequests();
+
+ // Update application requests
+ application.updateResourceRequests(ask);
+
+ LOG.debug("allocate: post-update" +
+ " applicationId=" + applicationAttemptId +
+ " application=" + application);
+ application.showRequests();
+
+ LOG.debug("allocate:" +
+ " applicationId=" + applicationAttemptId +
+ " #ask=" + ask.size());
+ }
+
+ return new Allocation(
+ application.pullNewlyAllocatedContainers(),
+ application.getHeadroom());
+ }
+
+ private void normalizeRequests(List<ResourceRequest> asks) {
+ for (ResourceRequest ask : asks) {
+ normalizeRequest(ask);
+ }
+ }
+
+ private void normalizeRequest(ResourceRequest ask) {
+ int memory = ask.getCapability().getMemory();
+ // FIXME: TestApplicationCleanup is relying on unnormalized behavior.
+ memory =
+ MINIMUM_MEMORY *
+ ((memory/MINIMUM_MEMORY) + (memory%MINIMUM_MEMORY > 0 ? 1 : 0));
+ ask.setCapability(Resources.createResource(memory));
+ }
+
+ private SchedulerApp getApplication(
+ ApplicationAttemptId applicationAttemptId) {
+ return applications.get(applicationAttemptId);
+ }
+
+ private SchedulerNode getNode(NodeId nodeId) {
+ return nodes.get(nodeId);
+ }
+
+ private synchronized void addApplication(ApplicationAttemptId appAttemptId,
+ String queueName, String user) {
+ // TODO: Fix store
+ SchedulerApp schedulerApp =
+ new SchedulerApp(appAttemptId, user, DEFAULT_QUEUE,
+ this.rmContext, null);
+ applications.put(appAttemptId, schedulerApp);
+ metrics.submitApp(user);
+ LOG.info("Application Submission: " + appAttemptId.getApplicationId() +
+ " from " + user + ", currently active: " + applications.size());
+ rmContext.getDispatcher().getEventHandler().handle(
+ new RMAppAttemptEvent(appAttemptId,
+ RMAppAttemptEventType.APP_ACCEPTED));
+ }
+
+ private synchronized void doneApplication(
+ ApplicationAttemptId applicationAttemptId,
+ RMAppAttemptState rmAppAttemptFinalState)
+ throws IOException {
+ SchedulerApp application = getApplication(applicationAttemptId);
+ if (application == null) {
+ throw new IOException("Unknown application " + applicationAttemptId +
+ " has completed!");
+ }
+
+ // Kill all 'live' containers
+ for (RMContainer container : application.getLiveContainers()) {
+ containerCompleted(container, RMContainerEventType.KILL);
+ }
+
+ // Clean up pending requests, metrics etc.
+ application.stop(rmAppAttemptFinalState);
+
+ // Remove the application
+ applications.remove(applicationAttemptId);
+ }
+
+ /**
+ * Heart of the scheduler...
+ *
+ * @param node node on which resources are available to be allocated
+ */
+ private void assignContainers(SchedulerNode node) {
+ LOG.debug("assignContainers:" +
+ " node=" + node.getRMNode().getNodeAddress() +
+ " #applications=" + applications.size());
+
+ // Try to assign containers to applications in fifo order
+ for (Map.Entry<ApplicationAttemptId, SchedulerApp> e : applications
+ .entrySet()) {
+ SchedulerApp application = e.getValue();
+ LOG.debug("pre-assignContainers");
+ application.showRequests();
+ synchronized (application) {
+ for (Priority priority : application.getPriorities()) {
+ int maxContainers =
+ getMaxAllocatableContainers(application, priority, node,
+ NodeType.OFF_SWITCH);
+ // Ensure the application needs containers of this priority
+ if (maxContainers > 0) {
+ int assignedContainers =
+ assignContainersOnNode(node, application, priority);
+ // Do not assign out of order w.r.t priorities
+ if (assignedContainers == 0) {
+ break;
+ }
+ }
+ }
+ }
+
+ application.setAvailableResourceLimit(clusterResource);
+
+ LOG.debug("post-assignContainers");
+ application.showRequests();
+
+ // Done
+ if (Resources.lessThan(node.getAvailableResource(), minimumAllocation)) {
+ return;
+ }
+ }
+ }
+
+ private int getMaxAllocatableContainers(SchedulerApp application,
+ Priority priority, SchedulerNode node, NodeType type) {
+ ResourceRequest offSwitchRequest =
+ application.getResourceRequest(priority, SchedulerNode.ANY);
+ int maxContainers = offSwitchRequest.getNumContainers();
+
+ if (type == NodeType.OFF_SWITCH) {
+ return maxContainers;
+ }
+
+ if (type == NodeType.RACK_LOCAL) {
+ ResourceRequest rackLocalRequest =
+ application.getResourceRequest(priority, node.getRMNode().getRackName());
+ if (rackLocalRequest == null) {
+ return maxContainers;
+ }
+
+ maxContainers = Math.min(maxContainers, rackLocalRequest.getNumContainers());
+ }
+
+ if (type == NodeType.NODE_LOCAL) {
+ ResourceRequest nodeLocalRequest =
+ application.getResourceRequest(priority, node.getRMNode().getNodeAddress());
+ if (nodeLocalRequest != null) {
+ maxContainers = Math.min(maxContainers, nodeLocalRequest.getNumContainers());
+ }
+ }
+
+ return maxContainers;
+ }
+
+
+ private int assignContainersOnNode(SchedulerNode node,
+ SchedulerApp application, Priority priority
+ ) {
+ // Data-local
+ int nodeLocalContainers =
+ assignNodeLocalContainers(node, application, priority);
+
+ // Rack-local
+ int rackLocalContainers =
+ assignRackLocalContainers(node, application, priority);
+
+ // Off-switch
+ int offSwitchContainers =
+ assignOffSwitchContainers(node, application, priority);
+
+
+ LOG.debug("assignContainersOnNode:" +
+ " node=" + node.getRMNode().getNodeAddress() +
+ " application=" + application.getApplicationId().getId() +
+ " priority=" + priority.getPriority() +
+ " #assigned=" +
+ (nodeLocalContainers + rackLocalContainers + offSwitchContainers));
+
+
+ return (nodeLocalContainers + rackLocalContainers + offSwitchContainers);
+ }
+
+ private int assignNodeLocalContainers(SchedulerNode node,
+ SchedulerApp application, Priority priority) {
+ int assignedContainers = 0;
+ ResourceRequest request =
+ application.getResourceRequest(priority, node.getRMNode().getNodeAddress());
+ if (request != null) {
+ int assignableContainers =
+ Math.min(
+ getMaxAllocatableContainers(application, priority, node,
+ NodeType.NODE_LOCAL),
+ request.getNumContainers());
+ assignedContainers =
+ assignContainer(node, application, priority,
+ assignableContainers, request, NodeType.NODE_LOCAL);
+ }
+ return assignedContainers;
+ }
+
+ private int assignRackLocalContainers(SchedulerNode node,
+ SchedulerApp application, Priority priority) {
+ int assignedContainers = 0;
+ ResourceRequest request =
+ application.getResourceRequest(priority, node.getRMNode().getRackName());
+ if (request != null) {
+ int assignableContainers =
+ Math.min(
+ getMaxAllocatableContainers(application, priority, node,
+ NodeType.RACK_LOCAL),
+ request.getNumContainers());
+ assignedContainers =
+ assignContainer(node, application, priority,
+ assignableContainers, request, NodeType.RACK_LOCAL);
+ }
+ return assignedContainers;
+ }
+
+ private int assignOffSwitchContainers(SchedulerNode node,
+ SchedulerApp application, Priority priority) {
+ int assignedContainers = 0;
+ ResourceRequest request =
+ application.getResourceRequest(priority, SchedulerNode.ANY);
+ if (request != null) {
+ assignedContainers =
+ assignContainer(node, application, priority,
+ request.getNumContainers(), request, NodeType.OFF_SWITCH);
+ }
+ return assignedContainers;
+ }
+
+ private int assignContainer(SchedulerNode node, SchedulerApp application,
+ Priority priority, int assignableContainers,
+ ResourceRequest request, NodeType type) {
+ LOG.debug("assignContainers:" +
+ " node=" + node.getRMNode().getNodeAddress() +
+ " application=" + application.getApplicationId().getId() +
+ " priority=" + priority.getPriority() +
+ " assignableContainers=" + assignableContainers +
+ " request=" + request + " type=" + type);
+ Resource capability = request.getCapability();
+
+ int availableContainers =
+ node.getAvailableResource().getMemory() / capability.getMemory(); // TODO: A buggy
+ // application
+ // with this
+ // zero would
+ // crash the
+ // scheduler.
+ int assignedContainers =
+ Math.min(assignableContainers, availableContainers);
+
+ if (assignedContainers > 0) {
+ for (int i=0; i < assignedContainers; ++i) {
+ // Create the container
+ Container container =
+ BuilderUtils.newContainer(recordFactory,
+ application.getApplicationAttemptId(),
+ application.getNewContainerId(),
+ node.getRMNode().getNodeID(),
+ node.getRMNode().getHttpAddress(), capability);
+
+ // If security is enabled, send the container-tokens too.
+ if (UserGroupInformation.isSecurityEnabled()) {
+ ContainerToken containerToken =
+ recordFactory.newRecordInstance(ContainerToken.class);
+ ContainerTokenIdentifier tokenidentifier =
+ new ContainerTokenIdentifier(container.getId(),
+ container.getNodeId().toString(), container.getResource());
+ containerToken.setIdentifier(
+ ByteBuffer.wrap(tokenidentifier.getBytes()));
+ containerToken.setKind(ContainerTokenIdentifier.KIND.toString());
+ containerToken.setPassword(
+ ByteBuffer.wrap(containerTokenSecretManager
+ .createPassword(tokenidentifier)));
+ containerToken.setService(container.getNodeId().toString());
+ container.setContainerToken(containerToken);
+ }
+
+ // Allocate!
+
+ // Inform the application
+ RMContainer rmContainer =
+ application.allocate(type, node, priority, request, container);
+
+ // Inform the node
+ node.allocateContainer(application.getApplicationId(),
+ rmContainer);
+ }
+
+ // Update total usage
+ Resources.addTo(usedResource,
+ Resources.multiply(capability, assignedContainers));
+ }
+
+ return assignedContainers;
+ }
+
+ private synchronized void nodeUpdate(RMNode rmNode,
+ Map<ApplicationId, List<Container>> remoteContainers) {
+ SchedulerNode node = getNode(rmNode.getNodeID());
+
+ for (List<Container> appContainers : remoteContainers.values()) {
+ for (Container container : appContainers) {
+ /* make sure the scheduler hasnt already removed the applications */
+ if (getApplication(container.getId().getAppAttemptId()) != null) {
+ if (container.getState() == ContainerState.RUNNING) {
+ containerLaunchedOnNode(container, node);
+ } else { // has to COMPLETE
+ containerCompleted(getRMContainer(container.getId()),
+ RMContainerEventType.FINISHED);
+ }
+ }
+ else {
+ LOG.warn("Scheduler not tracking application " + container.getId().getAppAttemptId());
+ }
+ }
+ }
+
+ if (Resources.greaterThanOrEqual(node.getAvailableResource(),
+ minimumAllocation)) {
+ LOG.info("Node heartbeat " + rmNode.getNodeID() +
+ " available resource = " + node.getAvailableResource());
+
+ assignContainers(node);
+
+ LOG.info("Node after allocation " + rmNode.getNodeID() + " resource = "
+ + node.getAvailableResource());
+ }
+
+ metrics.setAvailableResourcesToQueue(
+ Resources.subtract(clusterResource, usedResource));
+ }
+
+ @Override
+ public void handle(SchedulerEvent event) {
+ switch(event.getType()) {
+ case NODE_ADDED:
+ {
+ NodeAddedSchedulerEvent nodeAddedEvent = (NodeAddedSchedulerEvent)event;
+ addNode(nodeAddedEvent.getAddedRMNode());
+ }
+ break;
+ case NODE_REMOVED:
+ {
+ NodeRemovedSchedulerEvent nodeRemovedEvent = (NodeRemovedSchedulerEvent)event;
+ removeNode(nodeRemovedEvent.getRemovedRMNode());
+ }
+ break;
+ case NODE_UPDATE:
+ {
+ NodeUpdateSchedulerEvent nodeUpdatedEvent =
+ (NodeUpdateSchedulerEvent)event;
+ nodeUpdate(nodeUpdatedEvent.getRMNode(),
+ nodeUpdatedEvent.getContainers());
+ }
+ break;
+ case APP_ADDED:
+ {
+ AppAddedSchedulerEvent appAddedEvent = (AppAddedSchedulerEvent) event;
+ addApplication(appAddedEvent.getApplicationAttemptId(), appAddedEvent
+ .getQueue(), appAddedEvent.getUser());
+ }
+ break;
+ case APP_REMOVED:
+ {
+ AppRemovedSchedulerEvent appRemovedEvent = (AppRemovedSchedulerEvent)event;
+ try {
+ doneApplication(appRemovedEvent.getApplicationAttemptID(),
+ appRemovedEvent.getFinalAttemptState());
+ } catch(IOException ie) {
+ LOG.error("Unable to remove application "
+ + appRemovedEvent.getApplicationAttemptID(), ie);
+ }
+ }
+ break;
+ case CONTAINER_EXPIRED:
+ {
+ ContainerExpiredSchedulerEvent containerExpiredEvent =
+ (ContainerExpiredSchedulerEvent) event;
+ containerCompleted(getRMContainer(containerExpiredEvent.getContainerId()),
+ RMContainerEventType.EXPIRE);
+ }
+ break;
+ default:
+ LOG.error("Invalid eventtype " + event.getType() + ". Ignoring!");
+ }
+ }
+
+ private void containerLaunchedOnNode(Container container, SchedulerNode node) {
+ // Get the application for the finished container
+ ApplicationAttemptId applicationAttemptId = container.getId().getAppAttemptId();
+ SchedulerApp application = getApplication(applicationAttemptId);
+ if (application == null) {
+ LOG.info("Unknown application: " + applicationAttemptId +
+ " launched container " + container.getId() +
+ " on node: " + node);
+ return;
+ }
+
+ application.containerLaunchedOnNode(container.getId());
+ }
+
+ @Lock(FifoScheduler.class)
+ private synchronized void containerCompleted(RMContainer rmContainer,
+ RMContainerEventType event) {
+ // Get the application for the finished container
+ Container container = rmContainer.getContainer();
+ ApplicationAttemptId applicationAttemptId = container.getId().getAppAttemptId();
+ SchedulerApp application = getApplication(applicationAttemptId);
+
+ // Get the node on which the container was allocated
+ SchedulerNode node = getNode(container.getNodeId());
+
+ if (application == null) {
+ LOG.info("Unknown application: " + applicationAttemptId +
+ " released container " + container.getId() +
+ " on node: " + node +
+ " with event: " + event);
+ return;
+ }
+
+ // Inform the application
+ application.containerCompleted(rmContainer, event);
+
+ // Inform the node
+ node.releaseContainer(container);
+
+ LOG.info("Application " + applicationAttemptId +
+ " released container " + container.getId() +
+ " on node: " + node +
+ " with event: " + event);
+
+ }
+
+ private Resource clusterResource = recordFactory.newRecordInstance(Resource.class);
+ private Resource usedResource = recordFactory.newRecordInstance(Resource.class);
+
+ private synchronized void removeNode(RMNode nodeInfo) {
+ SchedulerNode node = getNode(nodeInfo.getNodeID());
+ // Kill running containers
+ for(RMContainer container : node.getRunningContainers()) {
+ containerCompleted(container, RMContainerEventType.KILL);
+ }
+
+ //Remove the node
+ this.nodes.remove(nodeInfo.getNodeID());
+
+ // Update cluster metrics
+ Resources.subtractFrom(clusterResource, nodeInfo.getTotalCapability());
+ }
+
+ @Override
+ public QueueInfo getQueueInfo(String queueName,
+ boolean includeChildQueues, boolean recursive) {
+ return DEFAULT_QUEUE.getQueueInfo(false, false);
+ }
+
+ @Override
+ public List<QueueUserACLInfo> getQueueUserAclInfo() {
+ return DEFAULT_QUEUE.getQueueUserAclInfo(null);
+ }
+
+ private synchronized void addNode(RMNode nodeManager) {
+ this.nodes.put(nodeManager.getNodeID(), new SchedulerNode(nodeManager));
+ Resources.addTo(clusterResource, nodeManager.getTotalCapability());
+ }
+
+ @Override
+ public void recover(RMState state) {
+ // TODO fix recovery
+// for (Map.Entry<ApplicationId, ApplicationInfo> entry: state.getStoredApplications().entrySet()) {
+// ApplicationId appId = entry.getKey();
+// ApplicationInfo appInfo = entry.getValue();
+// SchedulerApp app = applications.get(appId);
+// app.allocate(appInfo.getContainers());
+// }
+ }
+
+ @Override
+ public synchronized SchedulerNodeReport getNodeReport(NodeId nodeId) {
+ SchedulerNode node = getNode(nodeId);
+ return new SchedulerNodeReport(
+ node.getUsedResource(), node.getNumContainers());
+ }
+
+ private RMContainer getRMContainer(ContainerId containerId) {
+ SchedulerApp application =
+ getApplication(containerId.getAppAttemptId());
+ return application.getRMContainer(containerId);
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/tools/RMAdmin.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/tools/RMAdmin.java
new file mode 100644
index 0000000..e4bbfa5
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/tools/RMAdmin.java
@@ -0,0 +1,275 @@
+package org.apache.hadoop.yarn.server.resourcemanager.tools;
+
+import java.io.IOException;
+import java.security.PrivilegedAction;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.security.admin.AdminSecurityInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.RMConfig;
+import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocol;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
+
+public class RMAdmin extends Configured implements Tool {
+
+ private final RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(null);
+
+ public RMAdmin() {
+ super();
+ }
+
+ public RMAdmin(Configuration conf) {
+ super(conf);
+ }
+
+ private static void printHelp(String cmd) {
+ String summary = "rmadmin is the command to execute Map-Reduce administrative commands.\n" +
+ "The full syntax is: \n\n" +
+ "hadoop rmadmin" +
+ " [-refreshQueues]" +
+ " [-refreshNodes]" +
+ " [-refreshSuperUserGroupsConfiguration]" +
+ " [-refreshUserToGroupsMappings]" +
+ " [-refreshAdminAcls]" +
+ " [-help [cmd]]\n";
+
+ String refreshQueues =
+ "-refreshQueues: Reload the queues' acls, states and "
+ + "scheduler specific properties.\n"
+ + "\t\tResourceManager will reload the mapred-queues configuration file.\n";
+
+ String refreshNodes =
+ "-refreshNodes: Refresh the hosts information at the ResourceManager.\n";
+
+ String refreshUserToGroupsMappings =
+ "-refreshUserToGroupsMappings: Refresh user-to-groups mappings\n";
+
+ String refreshSuperUserGroupsConfiguration =
+ "-refreshSuperUserGroupsConfiguration: Refresh superuser proxy groups mappings\n";
+
+ String refreshAdminAcls =
+ "-refreshAdminAcls: Refresh acls for administration of ResourceManager\n";
+ String help = "-help [cmd]: \tDisplays help for the given command or all commands if none\n" +
+ "\t\tis specified.\n";
+
+ if ("refreshQueues".equals(cmd)) {
+ System.out.println(refreshQueues);
+ } else if ("refreshNodes".equals(cmd)) {
+ System.out.println(refreshNodes);
+ } else if ("refreshUserToGroupsMappings".equals(cmd)) {
+ System.out.println(refreshUserToGroupsMappings);
+ } else if ("refreshSuperUserGroupsConfiguration".equals(cmd)) {
+ System.out.println(refreshSuperUserGroupsConfiguration);
+ } else if ("refreshAdminAcls".equals(cmd)) {
+ System.out.println(refreshAdminAcls);
+ } else if ("help".equals(cmd)) {
+ System.out.println(help);
+ } else {
+ System.out.println(summary);
+ System.out.println(refreshQueues);
+ System.out.println(help);
+ System.out.println();
+ ToolRunner.printGenericCommandUsage(System.out);
+ }
+ }
+
+ /**
+ * Displays format of commands.
+ * @param cmd The command that is being executed.
+ */
+ private static void printUsage(String cmd) {
+ if ("-refreshQueues".equals(cmd)) {
+ System.err.println("Usage: java RMAdmin" + " [-refreshQueues]");
+ } else if ("-refreshNodes".equals(cmd)){
+ System.err.println("Usage: java RMAdmin" + " [-refreshNodes]");
+ } else if ("-refreshUserToGroupsMappings".equals(cmd)){
+ System.err.println("Usage: java RMAdmin" + " [-refreshUserToGroupsMappings]");
+ } else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)){
+ System.err.println("Usage: java RMAdmin" + " [-refreshSuperUserGroupsConfiguration]");
+ } else if ("-refreshAdminAcls".equals(cmd)){
+ System.err.println("Usage: java RMAdmin" + " [-refreshAdminAcls]");
+ } else {
+ System.err.println("Usage: java RMAdmin");
+ System.err.println(" [-refreshQueues]");
+ System.err.println(" [-refreshNodes]");
+ System.err.println(" [-refreshUserToGroupsMappings]");
+ System.err.println(" [-refreshSuperUserGroupsConfiguration]");
+ System.err.println(" [-refreshAdminAcls]");
+ System.err.println(" [-help [cmd]]");
+ System.err.println();
+ ToolRunner.printGenericCommandUsage(System.err);
+ }
+ }
+
+ private static UserGroupInformation getUGI(Configuration conf
+ ) throws IOException {
+ return UserGroupInformation.getCurrentUser();
+ }
+
+ private RMAdminProtocol createAdminProtocol() throws IOException {
+ // Get the current configuration
+ final YarnConfiguration conf = new YarnConfiguration(getConf());
+
+ // Create the client
+ final String adminAddress =
+ conf.get(RMConfig.ADMIN_ADDRESS,
+ RMConfig.DEFAULT_ADMIN_BIND_ADDRESS);
+ final YarnRPC rpc = YarnRPC.create(conf);
+
+ if (UserGroupInformation.isSecurityEnabled()) {
+ conf.setClass(YarnConfiguration.YARN_SECURITY_INFO,
+ AdminSecurityInfo.class, SecurityInfo.class);
+ }
+
+ RMAdminProtocol adminProtocol =
+ getUGI(conf).doAs(new PrivilegedAction<RMAdminProtocol>() {
+ @Override
+ public RMAdminProtocol run() {
+ return (RMAdminProtocol) rpc.getProxy(RMAdminProtocol.class,
+ NetUtils.createSocketAddr(adminAddress), conf);
+ }
+ });
+
+ return adminProtocol;
+ }
+
+ private int refreshQueues() throws IOException {
+ // Refresh the queue properties
+ RMAdminProtocol adminProtocol = createAdminProtocol();
+ RefreshQueuesRequest request =
+ recordFactory.newRecordInstance(RefreshQueuesRequest.class);
+ adminProtocol.refreshQueues(request);
+ return 0;
+ }
+
+ private int refreshNodes() throws IOException {
+ // Refresh the nodes
+ RMAdminProtocol adminProtocol = createAdminProtocol();
+ RefreshNodesRequest request =
+ recordFactory.newRecordInstance(RefreshNodesRequest.class);
+ adminProtocol.refreshNodes(request);
+ return 0;
+ }
+
+ private int refreshUserToGroupsMappings() throws IOException {
+ // Refresh the user-to-groups mappings
+ RMAdminProtocol adminProtocol = createAdminProtocol();
+ RefreshUserToGroupsMappingsRequest request =
+ recordFactory.newRecordInstance(RefreshUserToGroupsMappingsRequest.class);
+ adminProtocol.refreshUserToGroupsMappings(request);
+ return 0;
+ }
+
+ private int refreshSuperUserGroupsConfiguration() throws IOException {
+ // Refresh the super-user groups
+ RMAdminProtocol adminProtocol = createAdminProtocol();
+ RefreshSuperUserGroupsConfigurationRequest request =
+ recordFactory.newRecordInstance(RefreshSuperUserGroupsConfigurationRequest.class);
+ adminProtocol.refreshSuperUserGroupsConfiguration(request);
+ return 0;
+ }
+
+ private int refreshAdminAcls() throws IOException {
+ // Refresh the admin acls
+ RMAdminProtocol adminProtocol = createAdminProtocol();
+ RefreshAdminAclsRequest request =
+ recordFactory.newRecordInstance(RefreshAdminAclsRequest.class);
+ adminProtocol.refreshAdminAcls(request);
+ return 0;
+ }
+
+ @Override
+ public int run(String[] args) throws Exception {
+ if (args.length < 1) {
+ printUsage("");
+ return -1;
+ }
+
+ int exitCode = -1;
+ int i = 0;
+ String cmd = args[i++];
+ //
+ // verify that we have enough command line parameters
+ //
+ if ("-refreshAdminAcls".equals(cmd) || "-refreshQueues".equals(cmd) ||
+ "-refreshNodes".equals(cmd) ||
+ "-refreshUserToGroupsMappings".equals(cmd) ||
+ "-refreshSuperUserGroupsConfiguration".equals(cmd)) {
+ if (args.length != 1) {
+ printUsage(cmd);
+ return exitCode;
+ }
+ }
+
+ exitCode = 0;
+ try {
+ if ("-refreshQueues".equals(cmd)) {
+ exitCode = refreshQueues();
+ } else if ("-refreshNodes".equals(cmd)) {
+ exitCode = refreshNodes();
+ } else if ("-refreshUserToGroupsMappings".equals(cmd)) {
+ exitCode = refreshUserToGroupsMappings();
+ } else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) {
+ exitCode = refreshSuperUserGroupsConfiguration();
+ } else if ("-refreshAdminAcls".equals(cmd)) {
+ exitCode = refreshAdminAcls();
+ } else if ("-help".equals(cmd)) {
+ if (i < args.length) {
+ printUsage(args[i]);
+ } else {
+ printHelp("");
+ }
+ } else {
+ exitCode = -1;
+ System.err.println(cmd.substring(1) + ": Unknown command");
+ printUsage("");
+ printUsage("");
+ }
+
+ } catch (IllegalArgumentException arge) {
+ exitCode = -1;
+ System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage());
+ printUsage(cmd);
+ } catch (RemoteException e) {
+ //
+ // This is a error returned by hadoop server. Print
+ // out the first line of the error mesage, ignore the stack trace.
+ exitCode = -1;
+ try {
+ String[] content;
+ content = e.getLocalizedMessage().split("\n");
+ System.err.println(cmd.substring(1) + ": "
+ + content[0]);
+ } catch (Exception ex) {
+ System.err.println(cmd.substring(1) + ": "
+ + ex.getLocalizedMessage());
+ }
+ } catch (Exception e) {
+ exitCode = -1;
+ System.err.println(cmd.substring(1) + ": "
+ + e.getLocalizedMessage());
+ }
+ return exitCode;
+ }
+
+ public static void main(String[] args) throws Exception {
+ int result = ToolRunner.run(new RMAdmin(), args);
+ System.exit(result);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java
new file mode 100644
index 0000000..cf4d94b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java
@@ -0,0 +1,93 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import static org.apache.hadoop.yarn.util.StringHelper.join;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR_VALUE;
+
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.util.Apps;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+import org.apache.hadoop.yarn.webapp.view.JQueryUI.Render;
+
+import com.google.inject.Inject;
+
+class AppsBlock extends HtmlBlock {
+ final AppsList list;
+
+ @Inject AppsBlock(AppsList list, ViewContext ctx) {
+ super(ctx);
+ this.list = list;
+ }
+
+ @Override public void render(Block html) {
+ TBODY<TABLE<Hamlet>> tbody = html.
+ table("#apps").
+ thead().
+ tr().
+ th(".id", "ID").
+ th(".user", "User").
+ th(".name", "Name").
+ th(".queue", "Queue").
+ th(".state", "State").
+ th(".progress", "Progress").
+ th(".ui", "Tracking UI").
+ th(".note", "Note")._()._().
+ tbody();
+ int i = 0;
+ for (RMApp app : list.apps.values()) {
+ String appId = Apps.toString(app.getApplicationId());
+ String trackingUrl = app.getTrackingUrl();
+ String ui = trackingUrl == null || trackingUrl.isEmpty() ? "UNASSIGNED" :
+ (app.getFinishTime() == 0 ? "ApplicationMaster" : "JobHistory");
+ String percent = String.format("%.1f", app.getProgress() * 100);
+ tbody.
+ tr().
+ td().
+ br().$title(String.valueOf(app.getApplicationId().getId()))._(). // for sorting
+ a(url("app", appId), appId)._().
+ td(app.getUser().toString()).
+ td(app.getName().toString()).
+ td(app.getQueue().toString()).
+ td(app.getState().toString()).
+ td().
+ br().$title(percent)._(). // for sorting
+ div(_PROGRESSBAR).
+ $title(join(percent, '%')). // tooltip
+ div(_PROGRESSBAR_VALUE).
+ $style(join("width:", percent, '%'))._()._()._().
+ td().
+ a(trackingUrl == null ? "#" : join("http://", trackingUrl), ui)._().
+ td(app.getDiagnostics().toString())._();
+ if (list.rendering != Render.HTML && ++i >= 20) break;
+ }
+ tbody._()._();
+
+ if (list.rendering == Render.JS_ARRAY) {
+ echo("<script type='text/javascript'>\n",
+ "var appsData=");
+ list.toDataTableArrays(writer());
+ echo("\n</script>\n");
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java
new file mode 100644
index 0000000..a0dfa78
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java
@@ -0,0 +1,90 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import static org.apache.commons.lang.StringEscapeUtils.escapeHtml;
+import static org.apache.commons.lang.StringEscapeUtils.escapeJavaScript;
+import static org.apache.hadoop.yarn.webapp.view.Jsons._SEP;
+import static org.apache.hadoop.yarn.webapp.view.Jsons.appendLink;
+import static org.apache.hadoop.yarn.webapp.view.Jsons.appendProgressBar;
+import static org.apache.hadoop.yarn.webapp.view.Jsons.appendSortable;
+
+import java.io.PrintWriter;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.util.Apps;
+import org.apache.hadoop.yarn.webapp.Controller.RequestContext;
+import org.apache.hadoop.yarn.webapp.ToJSON;
+import org.apache.hadoop.yarn.webapp.view.JQueryUI.Render;
+
+import com.google.inject.Inject;
+import com.google.inject.servlet.RequestScoped;
+
+// So we only need to do asm.getApplications once in a request
+@RequestScoped
+class AppsList implements ToJSON {
+ final RequestContext rc;
+ final ConcurrentMap<ApplicationId, RMApp> apps;
+ Render rendering;
+
+ @Inject AppsList(RequestContext ctx, RMContext rmContext) {
+ rc = ctx;
+ apps = rmContext.getRMApps();
+ }
+
+ void toDataTableArrays(PrintWriter out) {
+ out.append('[');
+ boolean first = true;
+ for (RMApp app : apps.values()) {
+ if (first) {
+ first = false;
+ } else {
+ out.append(",\n");
+ }
+ String appID = Apps.toString(app.getApplicationId());
+ String trackingUrl = app.getTrackingUrl();
+ String ui = trackingUrl == null ? "UNASSIGNED" :
+ (app.getFinishTime() == 0 ? "ApplicationMaster" : "JobHistory");
+ out.append("[\"");
+ appendSortable(out, app.getApplicationId().getId());
+ appendLink(out, appID, rc.prefix(), "app", appID).append(_SEP).
+ append(escapeHtml(app.getUser().toString())).append(_SEP).
+ append(escapeHtml(app.getName().toString())).append(_SEP).
+ append(escapeHtml(app.getQueue())).append(_SEP).
+ append(app.getState().toString()).append(_SEP);
+ appendProgressBar(out, app.getProgress()).append(_SEP);
+ appendLink(out, ui, rc.prefix(),
+ trackingUrl == null ? "#" : "http://", trackingUrl).
+ append(_SEP).append(escapeJavaScript(escapeHtml(
+ app.getDiagnostics().toString()))).
+ append("\"]");
+ }
+ out.append(']');
+ }
+
+ @Override
+ public void toJSON(PrintWriter out) {
+ out.print("{\"aaData\":");
+ toDataTableArrays(out);
+ out.print("}\n");
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
new file mode 100644
index 0000000..76b0f0e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -0,0 +1,179 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import com.google.inject.Inject;
+import com.google.inject.servlet.RequestScoped;
+
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Queue;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+
+import static org.apache.hadoop.yarn.util.StringHelper.*;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
+
+class CapacitySchedulerPage extends RmView {
+ static final String _Q = ".ui-state-default.ui-corner-all";
+ static final float WIDTH_F = 0.8f;
+ static final String Q_END = "left:101%";
+ static final String OVER = "font-size:1px;background:rgba(255, 140, 0, 0.8)";
+ static final String UNDER = "font-size:1px;background:rgba(50, 205, 50, 0.8)";
+ static final float EPSILON = 1e-8f;
+
+ @RequestScoped
+ static class Parent {
+ Queue queue;
+ }
+
+ public static class QueueBlock extends HtmlBlock {
+ final Parent parent;
+
+ @Inject QueueBlock(Parent parent) {
+ this.parent = parent;
+ }
+
+ @Override
+ public void render(Block html) {
+ UL<Hamlet> ul = html.ul();
+ Queue parentQueue = parent.queue;
+ for (Queue queue : parentQueue.getChildQueues()) {
+ float used = queue.getUsedCapacity();
+ float set = queue.getCapacity();
+ float delta = Math.abs(set - used) + 0.001f;
+ float max = queue.getMaximumCapacity();
+ if (max < EPSILON || max > 1f) max = 1f;
+ //String absMaxPct = percent(queue.getAbsoluteMaximumCapacity());
+ LI<UL<Hamlet>> li = ul.
+ li().
+ a(_Q).$style(width(max * WIDTH_F)).
+ $title(join("used:", percent(used), " set:", percent(set),
+ " max:", percent(max))).
+ //span().$style(Q_END)._(absMaxPct)._().
+ span().$style(join(width(delta/max), ';',
+ used > set ? OVER : UNDER, ';',
+ used > set ? left(set/max) : left(used/max)))._('.')._().
+ span(".q", queue.getQueuePath().substring(5))._();
+ if (queue instanceof ParentQueue) {
+ parent.queue = queue;
+ li.
+ _(QueueBlock.class);
+ }
+ li._();
+ }
+ ul._();
+ }
+ }
+
+ static class QueuesBlock extends HtmlBlock {
+ final CapacityScheduler cs;
+ final Parent parent;
+
+ @Inject QueuesBlock(ResourceManager rm, Parent parent) {
+ cs = (CapacityScheduler) rm.getResourceScheduler();
+ this.parent = parent;
+ }
+
+ @Override
+ public void render(Block html) {
+ UL<DIV<DIV<Hamlet>>> ul = html.
+ div("#cs-wrapper.ui-widget").
+ div(".ui-widget-header.ui-corner-top").
+ _("Application Queues")._().
+ div("#cs.ui-widget-content.ui-corner-bottom").
+ ul();
+ if (cs == null) {
+ ul.
+ li().
+ a(_Q).$style(width(WIDTH_F)).
+ span().$style(Q_END)._("100% ")._().
+ span(".q", "default")._()._();
+ } else {
+ Queue root = cs.getRootQueue();
+ parent.queue = root;
+ float used = root.getUsedCapacity();
+ float set = root.getCapacity();
+ float delta = Math.abs(set - used) + 0.001f;
+ ul.
+ li().
+ a(_Q).$style(width(WIDTH_F)).
+ $title(join("used:", percent(used))).
+ span().$style(Q_END)._("100%")._().
+ span().$style(join(width(delta), ';', used > set ? OVER : UNDER,
+ ';', used > set ? left(set) : left(used)))._(".")._().
+ span(".q", "root")._().
+ _(QueueBlock.class)._();
+ }
+ ul._()._().
+ script().$type("text/javascript").
+ _("$('#cs').hide();")._()._().
+ _(AppsBlock.class);
+ }
+ }
+
+ @Override protected void postHead(Page.HTML<_> html) {
+ html.
+ style().$type("text/css").
+ _("#cs { padding: 0.5em 0 1em 0; margin-bottom: 1em; position: relative }",
+ "#cs ul { list-style: none }",
+ "#cs a { font-weight: normal; margin: 2px; position: relative }",
+ "#cs a span { font-weight: normal; font-size: 80% }",
+ "#cs-wrapper .ui-widget-header { padding: 0.2em 0.5em }")._().
+ script("/static/jt/jquery.jstree.js").
+ script().$type("text/javascript").
+ _("$(function() {",
+ " $('#cs a span').addClass('ui-corner-all').css('position', 'absolute');",
+ " $('#cs').bind('loaded.jstree', function (e, data) {",
+ " data.inst.open_all(); }).",
+ " jstree({",
+ " core: { animation: 188, html_titles: true },",
+ " plugins: ['themeroller', 'html_data', 'ui'],",
+ " themeroller: { item_open: 'ui-icon-minus',",
+ " item_clsd: 'ui-icon-plus', item_leaf: 'ui-icon-gear'",
+ " }",
+ " });",
+ " $('#cs').bind('select_node.jstree', function(e, data) {",
+ " var q = $('.q', data.rslt.obj).first().text();",
+ " if (q == 'root') q = '';",
+ " $('#apps').dataTable().fnFilter(q, 3);",
+ " });",
+ " $('#cs').show();",
+ "});")._();
+ }
+
+ @Override protected Class<? extends SubView> content() {
+ return QueuesBlock.class;
+ }
+
+ static String percent(float f) {
+ return String.format("%.1f%%", f * 100);
+ }
+
+ static String width(float f) {
+ return String.format("width:%.1f%%", f * 100);
+ }
+
+ static String left(float f) {
+ return String.format("left:%.1f%%", f * 100);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
new file mode 100644
index 0000000..a0a3030
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
@@ -0,0 +1,37 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
+
+class DefaultSchedulerPage extends RmView {
+
+ static class QueueBlock extends HtmlBlock {
+ @Override public void render(Block html) {
+ html.h2("Under construction");
+ }
+ }
+
+ @Override protected Class<? extends SubView> content() {
+ return QueueBlock.class;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/InfoPage.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/InfoPage.java
new file mode 100644
index 0000000..687c249
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/InfoPage.java
@@ -0,0 +1,33 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.view.InfoBlock;
+
+public class InfoPage extends RmView {
+
+ @Override protected void preHead(Page.HTML<_> html) {
+ commonPreHead(html);
+ }
+
+ @Override protected Class<? extends SubView> content() {
+ return InfoBlock.class;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java
new file mode 100644
index 0000000..cf474b1
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java
@@ -0,0 +1,42 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+
+public class NavBlock extends HtmlBlock {
+
+ @Override public void render(Block html) {
+ html.
+ div("#nav").
+ h3("Cluster").
+ ul().
+ li().a(url("cluster"), "About")._().
+ li().a(url("nodes"), "Nodes")._().
+ li().a(url("apps"), "Applications")._().
+ li().a(url("scheduler"), "Scheduler")._()._().
+ h3("Tools").
+ ul().
+ li().a("/conf", "Configuration")._().
+ li().a("/logs", "Local logs")._().
+ li().a("/stacks", "Server stacks")._().
+ li().a("/metrics", "Server metrics")._()._()._().
+ div("#themeswitcher")._();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
new file mode 100644
index 0000000..4231c4c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
@@ -0,0 +1,106 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
+
+import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.util.Times;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+
+import com.google.inject.Inject;
+
+class NodesPage extends RmView {
+
+ static class NodesBlock extends HtmlBlock {
+ final RMContext rmContext;
+
+ @Inject
+ NodesBlock(RMContext context, ViewContext ctx) {
+ super(ctx);
+ this.rmContext = context;
+ }
+
+ @Override
+ protected void render(Block html) {
+ TBODY<TABLE<Hamlet>> tbody = html.table("#nodes").
+ thead().
+ tr().
+ th(".rack", "Rack").
+ th(".nodeid", "Node ID").
+ th(".host", "Host").
+ th(".healthStatus", "Health-status").
+ th(".lastHealthUpdate", "Last health-update").
+ th(".healthReport", "Health-report").
+ th(".containers", "Containers").
+// th(".mem", "Mem Used (MB)").
+// th(".mem", "Mem Avail (MB)").
+ _()._().
+ tbody();
+ for (RMNode ni : this.rmContext.getRMNodes().values()) {
+ NodeHealthStatus health = ni.getNodeHealthStatus();
+ tbody.tr().
+ td(ni.getRackName()).
+ td(String.valueOf(ni.getNodeID().toString())).
+ td().a("http://" + ni.getHttpAddress(), ni.getHttpAddress())._().
+ td(health.getIsNodeHealthy() ? "Healthy" : "Unhealthy").
+ td(Times.format(health.getLastHealthReportTime())).
+ td(String.valueOf(health.getHealthReport())).
+ // TODO: acm: refactor2 FIXME
+ //td(String.valueOf(ni.getNumContainers())).
+ // TODO: FIXME Vinodkv
+// td(String.valueOf(ni.getUsedResource().getMemory())).
+// td(String.valueOf(ni.getAvailableResource().getMemory())).
+ _();
+ }
+ tbody._()._();
+ }
+ }
+
+ @Override protected void preHead(Page.HTML<_> html) {
+ commonPreHead(html);
+ setTitle("Nodes of the cluster");
+ set(DATATABLES_ID, "nodes");
+ set(initID(DATATABLES, "nodes"), nodesTableInit());
+ setTableStyles(html, "nodes", ".healthStatus {width:10em}",
+ ".healthReport {width:10em}");
+ }
+
+ @Override protected Class<? extends SubView> content() {
+ return NodesBlock.class;
+ }
+
+ private String nodesTableInit() {
+ return tableInit().
+ // rack, nodeid, host, healthStatus, health update ts, health report,
+ // containers, memused, memavail
+ append(", aoColumns:[null, null, null, null, null, null, ").
+ append("{bSearchable:false},{bSearchable:false},{bSearchable:false}]}").
+ toString();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
new file mode 100644
index 0000000..19a43d0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
@@ -0,0 +1,54 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import static org.apache.hadoop.yarn.util.StringHelper.pajoin;
+
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.webapp.WebApp;
+
+/**
+ * The RM webapp
+ */
+public class RMWebApp extends WebApp {
+ static final String APP_ID = "app.id";
+ static final String QUEUE_NAME = "queue.name";
+
+ private final ResourceManager rm;
+
+ public RMWebApp(ResourceManager rm) {
+ this.rm = rm;
+ }
+
+ @Override
+ public void setup() {
+ if (rm != null) {
+ bind(ResourceManager.class).toInstance(rm);
+ bind(RMContext.class).toInstance(rm.getRMContext());
+ }
+ route("/", RmController.class);
+ route("/nodes", RmController.class, "nodes");
+ route("/apps", RmController.class);
+ route("/cluster", RmController.class, "info");
+ route(pajoin("/app", APP_ID), RmController.class, "app");
+ route("/scheduler", RmController.class, "scheduler");
+ route(pajoin("/queue", QUEUE_NAME), RmController.class, "queue");
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
new file mode 100644
index 0000000..cbbf8af
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
@@ -0,0 +1,138 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import static org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebApp.APP_ID;
+import static org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebApp.QUEUE_NAME;
+import static org.apache.hadoop.yarn.util.StringHelper.join;
+
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.VersionInfo;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.util.Apps;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.util.Times;
+import org.apache.hadoop.yarn.util.YarnVersionInfo;
+import org.apache.hadoop.yarn.webapp.Controller;
+import org.apache.hadoop.yarn.webapp.ResponseInfo;
+
+import com.google.inject.Inject;
+
+// Do NOT rename/refactor this to RMView as it will wreak havoc
+// on Mac OS HFS as its case-insensitive!
+public class RmController extends Controller {
+ @Inject RmController(RequestContext ctx) { super(ctx); }
+
+ @Override public void index() {
+ setTitle("Applications");
+ }
+
+ public void info() {
+ setTitle("About the Cluster");
+ long ts = ResourceManager.clusterTimeStamp;
+ ResourceManager rm = getInstance(ResourceManager.class);
+ info("Cluster overview").
+ _("Cluster ID:", ts).
+ _("ResourceManager state:", rm.getServiceState()).
+ _("ResourceManager started on:", Times.format(ts)).
+ _("ResourceManager version:", YarnVersionInfo.getBuildVersion() +
+ " on " + YarnVersionInfo.getDate()).
+ _("Hadoop version:", VersionInfo.getBuildVersion() +
+ " on " + VersionInfo.getDate());
+ render(InfoPage.class);
+ }
+
+ public void app() {
+ String aid = $(APP_ID);
+ if (aid.isEmpty()) {
+ setStatus(response().SC_BAD_REQUEST);
+ setTitle("Bad request: requires application ID");
+ return;
+ }
+ ApplicationId appID = Apps.toAppID(aid);
+ RMContext context = getInstance(RMContext.class);
+ RMApp app = context.getRMApps().get(appID);
+ if (app == null) {
+ // TODO: handle redirect to jobhistory server
+ setStatus(response().SC_NOT_FOUND);
+ setTitle("Application not found: "+ aid);
+ return;
+ }
+ setTitle(join("Application ", aid));
+ String trackingUrl = app.getTrackingUrl();
+ String ui = trackingUrl == null ? "UNASSIGNED" :
+ (app.getFinishTime() == 0 ? "ApplicationMaster" : "JobHistory");
+
+ ResponseInfo info = info("Application Overview").
+ _("User:", app.getUser()).
+ _("Name:", app.getName()).
+ _("State:", app.getState()).
+ _("Started:", Times.format(app.getStartTime())).
+ _("Elapsed:", StringUtils.formatTime(
+ Times.elapsed(app.getStartTime(), app.getFinishTime()))).
+ _("Tracking URL:", trackingUrl == null ? "#" :
+ join("http://", trackingUrl), ui).
+ _("Diagnostics:", app.getDiagnostics());
+ Container masterContainer = app.getCurrentAppAttempt()
+ .getMasterContainer();
+ if (masterContainer != null) {
+ String url = join("http://", masterContainer.getNodeHttpAddress(),
+ "/yarn", "/containerlogs/",
+ ConverterUtils.toString(masterContainer.getId()));
+ info._("AM container logs:", url, url);
+ } else {
+ info._("AM container logs:", "AM not yet registered with RM");
+ }
+ render(InfoPage.class);
+ }
+
+ public void nodes() {
+ render(NodesPage.class);
+ }
+
+ public void scheduler() {
+ ResourceManager rm = getInstance(ResourceManager.class);
+ ResourceScheduler rs = rm.getResourceScheduler();
+ if (rs == null || rs instanceof CapacityScheduler) {
+ setTitle("Capacity Scheduler");
+ render(CapacitySchedulerPage.class);
+ return;
+ }
+ setTitle("Default Scheduler");
+ render(DefaultSchedulerPage.class);
+ }
+
+ public void queue() {
+ setTitle(join("Queue ", get(QUEUE_NAME, "unknown")));
+ }
+
+ public void submit() {
+ setTitle("Application Submission Not Allowed");
+ }
+
+ public void json() {
+ renderJSON(AppsList.class);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
new file mode 100644
index 0000000..a5337b8
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
@@ -0,0 +1,81 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.view.TwoColumnLayout;
+
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
+
+// Do NOT rename/refactor this to RMView as it will wreak havoc
+// on Mac OS HFS
+public class RmView extends TwoColumnLayout {
+ static final int MAX_DISPLAY_ROWS = 100; // direct table rendering
+ static final int MAX_FAST_ROWS = 1000; // inline js array
+ static final int MAX_INLINE_ROWS = 2000; // ajax load
+
+ @Override
+ protected void preHead(Page.HTML<_> html) {
+ commonPreHead(html);
+ set(DATATABLES_ID, "apps");
+ set(initID(DATATABLES, "apps"), appsTableInit());
+ setTableStyles(html, "apps", ".queue {width:6em}", ".ui {width:8em}");
+ }
+
+ protected void commonPreHead(Page.HTML<_> html) {
+ //html.meta_http("refresh", "20");
+ set(ACCORDION_ID, "nav");
+ set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
+ set(THEMESWITCHER_ID, "themeswitcher");
+ }
+
+ @Override
+ protected Class<? extends SubView> nav() {
+ return NavBlock.class;
+ }
+
+ @Override
+ protected Class<? extends SubView> content() {
+ return AppsBlock.class;
+ }
+
+ private String appsTableInit() {
+ AppsList list = getInstance(AppsList.class);
+ // id, user, name, queue, state, progress, ui, note
+ StringBuilder init = tableInit().
+ append(", aoColumns:[{sType:'title-numeric'}, null, null, null, null,").
+ append("{sType:'title-numeric', bSearchable:false}, null, null]");
+ String rows = $("rowlimit");
+ int rowLimit = rows.isEmpty() ? MAX_DISPLAY_ROWS : Integer.parseInt(rows);
+ if (list.apps.size() < rowLimit) {
+ list.rendering = Render.HTML;
+ return init.append('}').toString();
+ }
+ if (list.apps.size() > MAX_FAST_ROWS) {
+ tableInitProgress(init, list.apps.size() * 6);
+ }
+ if (list.apps.size() > MAX_INLINE_ROWS) {
+ list.rendering = Render.JS_LOAD;
+ return init.append(", sAjaxSource:'").append(url("apps", "json")).
+ append("'}").toString();
+ }
+ list.rendering = Render.JS_ARRAY;
+ return init.append(", aaData:appsData}").toString();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/RMAdminProtocol.proto b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/RMAdminProtocol.proto
new file mode 100644
index 0000000..12c8fc5
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/RMAdminProtocol.proto
@@ -0,0 +1,14 @@
+option java_package = "org.apache.hadoop.yarn.proto";
+option java_outer_classname = "RMAdminProtocol";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+
+import "yarn_server_resourcemanager_service_protos.proto";
+
+service RMAdminProtocolService {
+ rpc refreshQueues(RefreshQueuesRequestProto) returns (RefreshQueuesResponseProto);
+ rpc refreshNodes(RefreshNodesRequestProto) returns (RefreshNodesResponseProto);
+ rpc refreshSuperUserGroupsConfiguration(RefreshSuperUserGroupsConfigurationRequestProto) returns (RefreshSuperUserGroupsConfigurationResponseProto);
+ rpc refreshUserToGroupsMappings(RefreshUserToGroupsMappingsRequestProto) returns (RefreshUserToGroupsMappingsResponseProto);
+ rpc refreshAdminAcls(RefreshAdminAclsRequestProto) returns (RefreshAdminAclsResponseProto);
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_service_protos.proto b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_service_protos.proto
new file mode 100644
index 0000000..c6233e9
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_service_protos.proto
@@ -0,0 +1,30 @@
+option java_package = "org.apache.hadoop.yarn.proto";
+option java_outer_classname = "YarnServerResourceManagerServiceProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+
+
+message RefreshQueuesRequestProto {
+}
+message RefreshQueuesResponseProto {
+}
+
+message RefreshNodesRequestProto {
+}
+message RefreshNodesResponseProto {
+}
+
+message RefreshSuperUserGroupsConfigurationRequestProto {
+}
+message RefreshSuperUserGroupsConfigurationResponseProto {
+}
+
+message RefreshUserToGroupsMappingsRequestProto {
+}
+message RefreshUserToGroupsMappingsResponseProto {
+}
+
+message RefreshAdminAclsRequestProto {
+}
+message RefreshAdminAclsResponseProto {
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
new file mode 100644
index 0000000..a77d061
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
@@ -0,0 +1 @@
+org.apache.hadoop.yarn.security.admin.AdminSecurityInfo
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml
new file mode 100644
index 0000000..f6e2b0c
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml
@@ -0,0 +1,53 @@
+<configuration>
+
+ <property>
+ <name>yarn.capacity-scheduler.maximum-applications</name>
+ <value>10000</value>
+ </property>
+
+ <property>
+ <name>yarn.capacity-scheduler.root.queues</name>
+ <value>default</value>
+ </property>
+
+ <property>
+ <name>yarn.capacity-scheduler.root.capacity</name>
+ <value>100</value>
+ </property>
+
+ <property>
+ <name>yarn.capacity-scheduler.root.acl_administer_queues</name>
+ <value>*</value>
+ </property>
+
+ <property>
+ <name>yarn.capacity-scheduler.root.default.capacity</name>
+ <value>100</value>
+ </property>
+
+ <property>
+ <name>yarn.capacity-scheduler.root.default.user-limit-factor</name>
+ <value>1</value>
+ </property>
+
+ <property>
+ <name>yarn.capacity-scheduler.root.default.maximum-capacity</name>
+ <value>-1</value>
+ </property>
+
+ <property>
+ <name>yarn.capacity-scheduler.root.default.state</name>
+ <value>RUNNING</value>
+ </property>
+
+ <property>
+ <name>yarn.capacity-scheduler.root.default.acl_submit_jobs</name>
+ <value>*</value>
+ </property>
+
+ <property>
+ <name>yarn.capacity-scheduler.root.default.acl_administer_jobs</name>
+ <value>*</value>
+ </property>
+
+</configuration>
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
new file mode 100644
index 0000000..423b519
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
@@ -0,0 +1,407 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.Task.State;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
+import org.apache.hadoop.yarn.util.Records;
+
+@Private
+public class Application {
+ private static final Log LOG = LogFactory.getLog(Application.class);
+
+ private AtomicInteger taskCounter = new AtomicInteger(0);
+
+ private AtomicInteger numAttempts = new AtomicInteger(0);
+ final private String user;
+ final private String queue;
+ final private ApplicationId applicationId;
+ final private ApplicationAttemptId applicationAttemptId;
+ final private ResourceManager resourceManager;
+ private final static RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+ final private Map<Priority, Resource> requestSpec =
+ new TreeMap<Priority, Resource>(
+ new org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.Comparator());
+
+ final private Map<Priority, Map<String, ResourceRequest>> requests =
+ new TreeMap<Priority, Map<String, ResourceRequest>>(
+ new org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.Comparator());
+
+ final Map<Priority, Set<Task>> tasks =
+ new TreeMap<Priority, Set<Task>>(
+ new org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.Comparator());
+
+ final private Set<ResourceRequest> ask =
+ new TreeSet<ResourceRequest>(
+ new org.apache.hadoop.yarn.util.BuilderUtils.ResourceRequestComparator());
+
+ final private Map<String, NodeManager> nodes =
+ new HashMap<String, NodeManager>();
+
+ Resource used = recordFactory.newRecordInstance(Resource.class);
+
+ public Application(String user, ResourceManager resourceManager) {
+ this(user, "default", resourceManager);
+ }
+
+ public Application(String user, String queue, ResourceManager resourceManager) {
+ this.user = user;
+ this.queue = queue;
+ this.resourceManager = resourceManager;
+ this.applicationId =
+ this.resourceManager.getClientRMService().getNewApplicationId();
+ this.applicationAttemptId = Records.newRecord(ApplicationAttemptId.class);
+ this.applicationAttemptId.setApplicationId(this.applicationId);
+ this.applicationAttemptId.setAttemptId(this.numAttempts.getAndIncrement());
+ }
+
+ public String getUser() {
+ return user;
+ }
+
+ public String getQueue() {
+ return queue;
+ }
+
+ public ApplicationId getApplicationId() {
+ return applicationId;
+ }
+
+ public static String resolve(String hostName) {
+ return NetworkTopology.DEFAULT_RACK;
+ }
+
+ public int getNextTaskId() {
+ return taskCounter.incrementAndGet();
+ }
+
+ public Resource getUsedResources() {
+ return used;
+ }
+
+ public synchronized void submit() throws IOException {
+ ApplicationSubmissionContext context = recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
+ context.setApplicationId(this.applicationId);
+ context.setUser(this.user);
+ context.setQueue(this.queue);
+ SubmitApplicationRequest request = recordFactory
+ .newRecordInstance(SubmitApplicationRequest.class);
+ request.setApplicationSubmissionContext(context);
+ resourceManager.getClientRMService().submitApplication(request);
+ }
+
+ public synchronized void addResourceRequestSpec(
+ Priority priority, Resource capability) {
+ Resource currentSpec = requestSpec.put(priority, capability);
+ if (currentSpec != null) {
+ throw new IllegalStateException("Resource spec already exists for " +
+ "priority " + priority.getPriority() + " - " + currentSpec.getMemory());
+ }
+ }
+
+ public synchronized void addNodeManager(String host,
+ int containerManagerPort, NodeManager nodeManager) {
+ nodes.put(host + ":" + containerManagerPort, nodeManager);
+ }
+
+ private synchronized NodeManager getNodeManager(String host) {
+ return nodes.get(host);
+ }
+
+ public synchronized void addTask(Task task) {
+ Priority priority = task.getPriority();
+ Map<String, ResourceRequest> requests = this.requests.get(priority);
+ if (requests == null) {
+ requests = new HashMap<String, ResourceRequest>();
+ this.requests.put(priority, requests);
+ LOG.info("DEBUG --- Added" +
+ " priority=" + priority +
+ " application=" + applicationId);
+ }
+
+ final Resource capability = requestSpec.get(priority);
+
+ // Note down the task
+ Set<Task> tasks = this.tasks.get(priority);
+ if (tasks == null) {
+ tasks = new HashSet<Task>();
+ this.tasks.put(priority, tasks);
+ }
+ tasks.add(task);
+
+ LOG.info("Added task " + task.getTaskId() + " to application " +
+ applicationId + " at priority " + priority);
+
+ LOG.info("DEBUG --- addTask:" +
+ " application=" + applicationId +
+ " #asks=" + ask.size());
+
+ // Create resource requests
+ for (String host : task.getHosts()) {
+ // Data-local
+ addResourceRequest(priority, requests, host, capability);
+ }
+
+ // Rack-local
+ for (String rack : task.getRacks()) {
+ addResourceRequest(priority, requests, rack, capability);
+ }
+
+ // Off-switch
+ addResourceRequest(priority, requests,
+ org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode.ANY,
+ capability);
+ }
+
+ public synchronized void finishTask(Task task) throws IOException {
+ Set<Task> tasks = this.tasks.get(task.getPriority());
+ if (!tasks.remove(task)) {
+ throw new IllegalStateException(
+ "Finishing unknown task " + task.getTaskId() +
+ " from application " + applicationId);
+ }
+
+ NodeManager nodeManager = task.getNodeManager();
+ ContainerId containerId = task.getContainerId();
+ task.stop();
+ StopContainerRequest stopRequest = recordFactory.newRecordInstance(StopContainerRequest.class);
+ stopRequest.setContainerId(containerId);
+ nodeManager.stopContainer(stopRequest);
+
+ Resources.subtractFrom(used, requestSpec.get(task.getPriority()));
+
+ LOG.info("Finished task " + task.getTaskId() +
+ " of application " + applicationId +
+ " on node " + nodeManager.getHostName() +
+ ", currently using " + used + " resources");
+ }
+
+ private synchronized void addResourceRequest(
+ Priority priority, Map<String, ResourceRequest> requests,
+ String resourceName, Resource capability) {
+ ResourceRequest request = requests.get(resourceName);
+ if (request == null) {
+ request =
+ org.apache.hadoop.yarn.util.BuilderUtils.newResourceRequest(
+ priority, resourceName, capability, 1);
+ requests.put(resourceName, request);
+ } else {
+ request.setNumContainers(request.getNumContainers() + 1);
+ }
+
+ // Note this down for next interaction with ResourceManager
+ ask.remove(request);
+ ask.add(
+ org.apache.hadoop.yarn.util.BuilderUtils.newResourceRequest(
+ request)); // clone to ensure the RM doesn't manipulate the same obj
+
+ LOG.info("DEBUG --- addResourceRequest:" +
+ " applicationId=" + applicationId.getId() +
+ " priority=" + priority.getPriority() +
+ " resourceName=" + resourceName +
+ " capability=" + capability +
+ " numContainers=" + request.getNumContainers() +
+ " #asks=" + ask.size());
+ }
+
+ public synchronized List<Container> getResources() throws IOException {
+ LOG.info("DEBUG --- getResources begin:" +
+ " application=" + applicationId +
+ " #ask=" + ask.size());
+ for (ResourceRequest request : ask) {
+ LOG.info("DEBUG --- getResources:" +
+ " application=" + applicationId +
+ " ask-request=" + request);
+ }
+
+ // Get resources from the ResourceManager
+ resourceManager.getResourceScheduler().allocate(applicationAttemptId,
+ new ArrayList<ResourceRequest>(ask), new ArrayList<ContainerId>());
+ System.out.println("-=======" + applicationAttemptId);
+ System.out.println("----------" + resourceManager.getRMContext().getRMApps()
+ .get(applicationId).getRMAppAttempt(applicationAttemptId));
+
+ List<Container> containers = null;
+ // TODO: Fix
+// resourceManager.getRMContext().getRMApps()
+// .get(applicationId).getRMAppAttempt(applicationAttemptId)
+// .pullNewlyAllocatedContainers();
+
+ // Clear state for next interaction with ResourceManager
+ ask.clear();
+
+ LOG.info("DEBUG --- getResources() for " + applicationId + ":" +
+ " ask=" + ask.size() +
+ " recieved=" + containers.size());
+
+ return containers;
+ }
+
+ public synchronized void assign(List<Container> containers)
+ throws IOException {
+
+ int numContainers = containers.size();
+ // Schedule in priority order
+ for (Priority priority : requests.keySet()) {
+ assign(priority, NodeType.NODE_LOCAL, containers);
+ assign(priority, NodeType.RACK_LOCAL, containers);
+ assign(priority, NodeType.OFF_SWITCH, containers);
+
+ if (containers.isEmpty()) {
+ break;
+ }
+ }
+
+ int assignedContainers = numContainers - containers.size();
+ LOG.info("Application " + applicationId + " assigned " +
+ assignedContainers + "/" + numContainers);
+ }
+
+ public synchronized void schedule() throws IOException {
+ assign(getResources());
+ }
+
+ private synchronized void assign(Priority priority, NodeType type,
+ List<Container> containers) throws IOException {
+ for (Iterator<Container> i=containers.iterator(); i.hasNext();) {
+ Container container = i.next();
+ String host = container.getNodeId().toString();
+
+ if (Resources.equals(requestSpec.get(priority), container.getResource())) {
+ // See which task can use this container
+ for (Iterator<Task> t=tasks.get(priority).iterator(); t.hasNext();) {
+ Task task = t.next();
+ if (task.getState() == State.PENDING && task.canSchedule(type, host)) {
+ NodeManager nodeManager = getNodeManager(host);
+
+ task.start(nodeManager, container.getId());
+ i.remove();
+
+ // Track application resource usage
+ Resources.addTo(used, container.getResource());
+
+ LOG.info("Assigned container (" + container + ") of type " + type +
+ " to task " + task.getTaskId() + " at priority " + priority +
+ " on node " + nodeManager.getHostName() +
+ ", currently using " + used + " resources");
+
+ // Update resource requests
+ updateResourceRequests(requests.get(priority), type, task);
+
+ // Launch the container
+ StartContainerRequest startRequest = recordFactory.newRecordInstance(StartContainerRequest.class);
+ startRequest.setContainerLaunchContext(createCLC(container));
+ nodeManager.startContainer(startRequest);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ private void updateResourceRequests(Map<String, ResourceRequest> requests,
+ NodeType type, Task task) {
+ if (type == NodeType.NODE_LOCAL) {
+ for (String host : task.getHosts()) {
+ LOG.info("DEBUG --- updateResourceRequests:" +
+ " application=" + applicationId +
+ " type=" + type +
+ " host=" + host +
+ " request=" + ((requests == null) ? "null" : requests.get(host)));
+ updateResourceRequest(requests.get(host));
+ }
+ }
+
+ if (type == NodeType.NODE_LOCAL || type == NodeType.RACK_LOCAL) {
+ for (String rack : task.getRacks()) {
+ LOG.info("DEBUG --- updateResourceRequests:" +
+ " application=" + applicationId +
+ " type=" + type +
+ " rack=" + rack +
+ " request=" + ((requests == null) ? "null" : requests.get(rack)));
+ updateResourceRequest(requests.get(rack));
+ }
+ }
+
+ updateResourceRequest(
+ requests.get(
+ org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode.ANY)
+ );
+
+ LOG.info("DEBUG --- updateResourceRequests:" +
+ " application=" + applicationId +
+ " #asks=" + ask.size());
+ }
+
+ private void updateResourceRequest(ResourceRequest request) {
+ request.setNumContainers(request.getNumContainers() - 1);
+
+ // Note this for next interaction with ResourceManager
+ ask.remove(request);
+ ask.add(
+ org.apache.hadoop.yarn.util.BuilderUtils.newResourceRequest(
+ request)); // clone to ensure the RM doesn't manipulate the same obj
+
+ LOG.info("DEBUG --- updateResourceRequest:" +
+ " application=" + applicationId +
+ " request=" + request);
+ }
+
+ private ContainerLaunchContext createCLC(Container container) {
+ ContainerLaunchContext clc = recordFactory.newRecordInstance(ContainerLaunchContext.class);
+ clc.setContainerId(container.getId());
+ clc.setUser(this.user);
+ clc.setResource(container.getResource());
+ return clc;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
new file mode 100644
index 0000000..8efe6f3
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
@@ -0,0 +1,142 @@
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.yarn.api.AMRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.records.AMResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.util.Records;
+
+public class MockAM {
+
+ private volatile int responseId = 0;
+ private final ApplicationAttemptId attemptId;
+ private final RMContext context;
+ private final AMRMProtocol amRMProtocol;
+
+ private final List<ResourceRequest> requests = new ArrayList<ResourceRequest>();
+ private final List<ContainerId> releases = new ArrayList<ContainerId>();
+
+ MockAM(RMContext context, AMRMProtocol amRMProtocol,
+ ApplicationAttemptId attemptId) {
+ this.context = context;
+ this.amRMProtocol = amRMProtocol;
+ this.attemptId = attemptId;
+ }
+
+ public void waitForState(RMAppAttemptState finalState) throws Exception {
+ RMApp app = context.getRMApps().get(attemptId.getApplicationId());
+ RMAppAttempt attempt = app.getRMAppAttempt(attemptId);
+ int timeoutSecs = 0;
+ while (!finalState.equals(attempt.getAppAttemptState())
+ && timeoutSecs++ < 20) {
+ System.out
+ .println("AppAttempt State is : " + attempt.getAppAttemptState()
+ + " Waiting for state : " + finalState);
+ Thread.sleep(500);
+ }
+ System.out.println("AppAttempt State is : " + attempt.getAppAttemptState());
+ Assert.assertEquals("AppAttempt state is not correct (timedout)",
+ finalState, attempt.getAppAttemptState());
+ }
+
+ public void registerAppAttempt() throws Exception {
+ waitForState(RMAppAttemptState.LAUNCHED);
+ responseId = 0;
+ RegisterApplicationMasterRequest req = Records.newRecord(RegisterApplicationMasterRequest.class);
+ req.setApplicationAttemptId(attemptId);
+ req.setHost("");
+ req.setRpcPort(1);
+ req.setTrackingUrl("");
+ amRMProtocol.registerApplicationMaster(req);
+ }
+
+ public void addRequests(String[] hosts, int memory, int priority,
+ int containers) throws Exception {
+ requests.addAll(createReq(hosts, memory, priority, containers));
+ }
+
+ public AMResponse schedule() throws Exception {
+ AMResponse response = allocate(requests, releases);
+ requests.clear();
+ releases.clear();
+ return response;
+ }
+
+ public AMResponse allocate(
+ String host, int memory, int numContainers,
+ List<ContainerId> releases) throws Exception {
+ List reqs = createReq(new String[]{host}, memory, 1, numContainers);
+ return allocate(reqs, releases);
+ }
+
+ public List<ResourceRequest> createReq(String[] hosts, int memory, int priority,
+ int containers) throws Exception {
+ List<ResourceRequest> reqs = new ArrayList<ResourceRequest>();
+ for (String host : hosts) {
+ ResourceRequest hostReq = createResourceReq(host, memory, priority,
+ containers);
+ reqs.add(hostReq);
+ ResourceRequest rackReq = createResourceReq("default-rack", memory,
+ priority, containers);
+ reqs.add(rackReq);
+ }
+
+ ResourceRequest offRackReq = createResourceReq("*", memory, priority,
+ containers);
+ reqs.add(offRackReq);
+ return reqs;
+
+ }
+
+ public ResourceRequest createResourceReq(String resource, int memory, int priority,
+ int containers) throws Exception {
+ ResourceRequest req = Records.newRecord(ResourceRequest.class);
+ req.setHostName(resource);
+ req.setNumContainers(containers);
+ Priority pri = Records.newRecord(Priority.class);
+ pri.setPriority(1);
+ req.setPriority(pri);
+ Resource capability = Records.newRecord(Resource.class);
+ capability.setMemory(memory);
+ req.setCapability(capability);
+ return req;
+ }
+
+ public AMResponse allocate(
+ List<ResourceRequest> resourceRequest, List<ContainerId> releases)
+ throws Exception {
+ AllocateRequest req = Records.newRecord(AllocateRequest.class);
+ req.setResponseId(++responseId);
+ req.setApplicationAttemptId(attemptId);
+ req.addAllAsks(resourceRequest);
+ req.addAllReleases(releases);
+ AllocateResponse resp = amRMProtocol.allocate(req);
+ return resp.getAMResponse();
+ }
+
+ public void unregisterAppAttempt() throws Exception {
+ waitForState(RMAppAttemptState.RUNNING);
+ FinishApplicationMasterRequest req = Records.newRecord(FinishApplicationMasterRequest.class);
+ req.setAppAttemptId(attemptId);
+ req.setDiagnostics("");
+ req.setFinalState("");
+ req.setTrackingUrl("");
+ amRMProtocol.finishApplicationMaster(req);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
new file mode 100644
index 0000000..aa6dd51
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
@@ -0,0 +1,82 @@
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
+import org.apache.hadoop.yarn.server.api.records.NodeStatus;
+import org.apache.hadoop.yarn.util.Records;
+
+public class MockNM {
+
+ private int responseId;
+ private NodeId nodeId;
+ private final String nodeIdStr;
+ private final int memory;
+ private final ResourceTrackerService resourceTracker;
+
+ MockNM(String nodeIdStr, int memory, ResourceTrackerService resourceTracker) {
+ this.nodeIdStr = nodeIdStr;
+ this.memory = memory;
+ this.resourceTracker = resourceTracker;
+ }
+
+ public NodeId getNodeId() {
+ return nodeId;
+ }
+
+ public void containerStatus(Container container) throws Exception {
+ Map<ApplicationId, List<Container>> conts = new HashMap<ApplicationId, List<Container>>();
+ conts.put(container.getId().getAppId(), Arrays
+ .asList(new Container[] { container }));
+ nodeHeartbeat(conts, true);
+ }
+
+ public NodeId registerNode() throws Exception {
+ String[] splits = nodeIdStr.split(":");
+ nodeId = Records.newRecord(NodeId.class);
+ nodeId.setHost(splits[0]);
+ nodeId.setPort(Integer.parseInt(splits[1]));
+ RegisterNodeManagerRequest req = Records.newRecord(
+ RegisterNodeManagerRequest.class);
+ req.setNodeId(nodeId);
+ req.setHttpPort(2);
+ Resource resource = Records.newRecord(Resource.class);
+ resource.setMemory(memory);
+ req.setResource(resource);
+ resourceTracker.registerNodeManager(req);
+ return nodeId;
+ }
+
+ public HeartbeatResponse nodeHeartbeat(boolean b) throws Exception {
+ return nodeHeartbeat(new HashMap<ApplicationId, List<Container>>(), b);
+ }
+
+ public HeartbeatResponse nodeHeartbeat(Map<ApplicationId,
+ List<Container>> conts, boolean isHealthy) throws Exception {
+ NodeHeartbeatRequest req = Records.newRecord(NodeHeartbeatRequest.class);
+ NodeStatus status = Records.newRecord(NodeStatus.class);
+ status.setNodeId(nodeId);
+ for (Map.Entry<ApplicationId, List<Container>> entry : conts.entrySet()) {
+ status.setContainers(entry.getKey(), entry.getValue());
+ }
+ NodeHealthStatus healthStatus = Records.newRecord(NodeHealthStatus.class);
+ healthStatus.setHealthReport("");
+ healthStatus.setIsNodeHealthy(isHealthy);
+ healthStatus.setLastHealthReportTime(1);
+ status.setNodeHealthStatus(healthStatus);
+ status.setResponseId(++responseId);
+ req.setNodeStatus(status);
+ return resourceTracker.nodeHeartbeat(req).getHeartbeatResponse();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
new file mode 100644
index 0000000..a7df022
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
@@ -0,0 +1,173 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.net.Node;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeResponse;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Test helper to generate mock nodes
+ */
+public class MockNodes {
+ private static int NODE_ID = 0;
+ private static RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+ public static List<RMNode> newNodes(int racks, int nodesPerRack,
+ Resource perNode) {
+ List<RMNode> list = Lists.newArrayList();
+ for (int i = 0; i < racks; ++i) {
+ for (int j = 0; j < nodesPerRack; ++j) {
+ list.add(newNodeInfo(i, perNode));
+ }
+ }
+ return list;
+ }
+
+ public static NodeId newNodeID(String host, int port) {
+ NodeId nid = recordFactory.newRecordInstance(NodeId.class);
+ nid.setHost(host);
+ nid.setPort(port);
+ return nid;
+ }
+
+ public static Resource newResource(int mem) {
+ Resource rs = recordFactory.newRecordInstance(Resource.class);
+ rs.setMemory(mem);
+ return rs;
+ }
+
+ public static Resource newUsedResource(Resource total) {
+ Resource rs = recordFactory.newRecordInstance(Resource.class);
+ rs.setMemory((int)(Math.random() * total.getMemory()));
+ return rs;
+ }
+
+ public static Resource newAvailResource(Resource total, Resource used) {
+ Resource rs = recordFactory.newRecordInstance(Resource.class);
+ rs.setMemory(total.getMemory() - used.getMemory());
+ return rs;
+ }
+
+ public static RMNode newNodeInfo(int rack, final Resource perNode) {
+ final String rackName = "rack"+ rack;
+ final int nid = NODE_ID++;
+ final String hostName = "host"+ nid;
+ final int port = 123;
+ final NodeId nodeID = newNodeID(hostName, port);
+ final String httpAddress = "localhost:0";
+ final NodeHealthStatus nodeHealthStatus =
+ recordFactory.newRecordInstance(NodeHealthStatus.class);
+ final Resource used = newUsedResource(perNode);
+ final Resource avail = newAvailResource(perNode, used);
+ return new RMNode() {
+ @Override
+ public NodeId getNodeID() {
+ return nodeID;
+ }
+
+ @Override
+ public String getNodeAddress() {
+ return hostName;
+ }
+
+ @Override
+ public String getHttpAddress() {
+ return httpAddress;
+ }
+
+ @Override
+ public Resource getTotalCapability() {
+ return perNode;
+ }
+
+ @Override
+ public String getRackName() {
+ return rackName;
+ }
+
+ @Override
+ public Node getNode() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public NodeHealthStatus getNodeHealthStatus() {
+ return nodeHealthStatus;
+ }
+
+ @Override
+ public int getCommandPort() {
+ return nid;
+ }
+
+ @Override
+ public int getHttpPort() {
+ // TODO Auto-generated method stub
+ return 0;
+ }
+
+ @Override
+ public String getHostName() {
+ return hostName;
+ }
+
+ @Override
+ public RMNodeState getState() {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public List<ApplicationId> pullAppsToCleanup() {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public List<ContainerId> pullContainersToCleanUp() {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public HeartbeatResponse getLastHeartBeatResponse() {
+ // TODO Auto-generated method stub
+ return null;
+ }
+ };
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
new file mode 100644
index 0000000..03d88fd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -0,0 +1,193 @@
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.ClientRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptLaunchFailedEvent;
+import org.apache.hadoop.yarn.util.Records;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+
+
+public class MockRM extends ResourceManager {
+
+ public MockRM() {
+ this(new Configuration());
+ }
+
+ public MockRM(Configuration conf) {
+ super(StoreFactory.getStore(conf));
+ init(conf);
+ Logger rootLogger = LogManager.getRootLogger();
+ rootLogger.setLevel(Level.DEBUG);
+ }
+
+ public void waitForState(ApplicationId appId, RMAppState finalState)
+ throws Exception {
+ RMApp app = getRMContext().getRMApps().get(appId);
+ int timeoutSecs = 0;
+ while (!finalState.equals(app.getState()) &&
+ timeoutSecs++ < 20) {
+ System.out.println("App State is : " + app.getState() +
+ " Waiting for state : " + finalState);
+ Thread.sleep(500);
+ }
+ System.out.println("App State is : " + app.getState());
+ Assert.assertEquals("App state is not correct (timedout)",
+ finalState, app.getState());
+ }
+
+ //client
+ public RMApp submitApp(int masterMemory) throws Exception {
+ ClientRMProtocol client = getClientRMService();
+ GetNewApplicationIdResponse resp = client.getNewApplicationId(Records.newRecord(GetNewApplicationIdRequest.class));
+ ApplicationId appId = resp.getApplicationId();
+
+ SubmitApplicationRequest req = Records.newRecord(SubmitApplicationRequest.class);
+ ApplicationSubmissionContext sub = Records.newRecord(ApplicationSubmissionContext.class);
+ sub.setApplicationId(appId);
+ sub.setApplicationName("");
+ sub.setUser("");
+ Resource capability = Records.newRecord(Resource.class);
+ capability.setMemory(masterMemory);
+ sub.setMasterCapability(capability);
+ req.setApplicationSubmissionContext(sub);
+
+ client.submitApplication(req);
+ waitForState(appId, RMAppState.ACCEPTED);
+ return getRMContext().getRMApps().get(appId);
+ }
+
+ public MockNM registerNode(String nodeIdStr, int memory) throws Exception {
+ MockNM nm = new MockNM(nodeIdStr, memory, getResourceTrackerService());
+ nm.registerNode();
+ return nm;
+ }
+
+ public void killApp(ApplicationId appId) throws Exception {
+ ClientRMProtocol client = getClientRMService();
+ FinishApplicationRequest req = Records.newRecord(FinishApplicationRequest.class);
+ req.setApplicationId(appId);
+ client.finishApplication(req);
+ }
+
+ //from AMLauncher
+ public MockAM sendAMLaunched(ApplicationAttemptId appAttemptId) throws Exception {
+ MockAM am = new MockAM(getRMContext(), masterService, appAttemptId);
+ am.waitForState(RMAppAttemptState.ALLOCATED);
+ getRMContext().getDispatcher().getEventHandler().handle(
+ new RMAppAttemptEvent(appAttemptId, RMAppAttemptEventType.LAUNCHED));
+ return am;
+ }
+
+
+ public void sendAMLaunchFailed(ApplicationAttemptId appAttemptId) throws Exception {
+ MockAM am = new MockAM(getRMContext(), masterService, appAttemptId);
+ am.waitForState(RMAppAttemptState.ALLOCATED);
+ getRMContext().getDispatcher().getEventHandler().handle(
+ new RMAppAttemptLaunchFailedEvent(appAttemptId, "Failed"));
+ }
+
+ @Override
+ protected ClientRMService createClientRMService() {
+ return new ClientRMService(getRMContext(),
+ clientToAMSecretManager, getResourceScheduler(), masterService) {
+ @Override
+ public void start() {
+ //override to not start rpc handler
+ }
+ @Override
+ public void stop() {
+ // don't do anything
+ }
+ };
+ }
+
+ @Override
+ protected ResourceTrackerService createResourceTrackerService() {
+ return new ResourceTrackerService(getRMContext(), nodesListManager,
+ this.nmLivelinessMonitor, this.containerTokenSecretManager){
+ @Override
+ public void start() {
+ //override to not start rpc handler
+ }
+ @Override
+ public void stop() {
+ // don't do anything
+ }
+ };
+ }
+
+ @Override
+ protected ApplicationMasterService createApplicationMasterService() {
+ return new ApplicationMasterService(getRMContext(),
+ this.appTokenSecretManager, scheduler) {
+ @Override
+ public void start() {
+ //override to not start rpc handler
+ }
+ @Override
+ public void stop() {
+ // don't do anything
+ }
+ };
+ }
+
+ @Override
+ protected ApplicationMasterLauncher createAMLauncher() {
+ return new ApplicationMasterLauncher(
+ this.appTokenSecretManager, this.clientToAMSecretManager,
+ getRMContext()) {
+ @Override
+ public void start() {
+ //override to not start rpc handler
+ }
+ @Override
+ public void handle(AMLauncherEvent appEvent) {
+ //don't do anything
+ }
+ @Override
+ public void stop() {
+ // don't do anything
+ }
+ };
+ }
+
+ protected AdminService createAdminService() {
+ return new AdminService(getConfig(), scheduler, getRMContext(),
+ this.nodesListManager){
+ @Override
+ public void start() {
+ //override to not start rpc handler
+ }
+ @Override
+ public void stop() {
+ // don't do anything
+ }
+ };
+ }
+
+ @Override
+ protected void startWepApp() {
+ //override to disable webapp
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java
new file mode 100644
index 0000000..23871a7
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java
@@ -0,0 +1,270 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
+import org.apache.hadoop.yarn.server.api.records.NodeStatus;
+import org.apache.hadoop.yarn.server.api.records.RegistrationResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+
+@Private
+public class NodeManager implements ContainerManager {
+ private static final Log LOG = LogFactory.getLog(NodeManager.class);
+ private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+ final private String containerManagerAddress;
+ final private String nodeHttpAddress;
+ final private String rackName;
+ final private NodeId nodeId;
+ final private Resource capability;
+ Resource available = recordFactory.newRecordInstance(Resource.class);
+ Resource used = recordFactory.newRecordInstance(Resource.class);
+
+ final ResourceTrackerService resourceTrackerService;
+ final SchedulerNode schedulerNode;
+ final Map<ApplicationId, List<Container>> containers =
+ new HashMap<ApplicationId, List<Container>>();
+
+ public NodeManager(String hostName, int containerManagerPort, int httpPort,
+ String rackName, int memory,
+ ResourceTrackerService resourceTrackerService, RMContext rmContext)
+ throws IOException {
+ this.containerManagerAddress = hostName + ":" + containerManagerPort;
+ this.nodeHttpAddress = hostName + ":" + httpPort;
+ this.rackName = rackName;
+ this.resourceTrackerService = resourceTrackerService;
+ this.capability = Resources.createResource(memory);
+ Resources.addTo(available, capability);
+
+ this.nodeId = recordFactory.newRecordInstance(NodeId.class);
+ this.nodeId.setHost(hostName);
+ this.nodeId.setPort(containerManagerPort);
+ RegisterNodeManagerRequest request = recordFactory
+ .newRecordInstance(RegisterNodeManagerRequest.class);
+ request.setHttpPort(httpPort);
+ request.setNodeId(this.nodeId);
+ request.setResource(capability);
+ request.setNodeId(this.nodeId);
+ resourceTrackerService.registerNodeManager(request)
+ .getRegistrationResponse();
+ this.schedulerNode = new SchedulerNode(rmContext.getRMNodes().get(
+ this.nodeId));
+
+ // Sanity check
+ Assert.assertEquals(memory,
+ schedulerNode.getAvailableResource().getMemory());
+ }
+
+ public String getHostName() {
+ return containerManagerAddress;
+ }
+
+ public String getRackName() {
+ return rackName;
+ }
+
+ public NodeId getNodeId() {
+ return nodeId;
+ }
+
+ public Resource getCapability() {
+ return capability;
+ }
+
+ public Resource getAvailable() {
+ return available;
+ }
+
+ public Resource getUsed() {
+ return used;
+ }
+
+ int responseID = 0;
+
+ public void heartbeat() throws IOException {
+ NodeStatus nodeStatus =
+ org.apache.hadoop.yarn.server.resourcemanager.NodeManager.createNodeStatus(
+ nodeId, containers);
+ nodeStatus.setResponseId(responseID);
+ NodeHeartbeatRequest request = recordFactory
+ .newRecordInstance(NodeHeartbeatRequest.class);
+ request.setNodeStatus(nodeStatus);
+ HeartbeatResponse response = resourceTrackerService
+ .nodeHeartbeat(request).getHeartbeatResponse();
+ responseID = response.getResponseId();
+ }
+
+ @Override
+ synchronized public StartContainerResponse startContainer(StartContainerRequest request) throws YarnRemoteException {
+ ContainerLaunchContext containerLaunchContext = request.getContainerLaunchContext();
+
+ ApplicationId applicationId = containerLaunchContext.getContainerId()
+ .getAppId();
+
+ List<Container> applicationContainers = containers.get(applicationId);
+ if (applicationContainers == null) {
+ applicationContainers = new ArrayList<Container>();
+ containers.put(applicationId, applicationContainers);
+ }
+
+ // Sanity check
+ for (Container container : applicationContainers) {
+ if (container.getId().compareTo(containerLaunchContext.getContainerId()) == 0) {
+ throw new IllegalStateException(
+ "Container " + containerLaunchContext.getContainerId() +
+ " already setup on node " + containerManagerAddress);
+ }
+ }
+
+ Container container =
+ BuilderUtils.newContainer(containerLaunchContext.getContainerId(),
+ this.nodeId, nodeHttpAddress,
+ containerLaunchContext.getResource());
+
+ applicationContainers.add(container);
+
+ Resources.subtractFrom(available, containerLaunchContext.getResource());
+ Resources.addTo(used, containerLaunchContext.getResource());
+
+ LOG.info("DEBUG --- startContainer:" +
+ " node=" + containerManagerAddress +
+ " application=" + applicationId +
+ " container=" + container +
+ " available=" + available +
+ " used=" + used);
+
+ StartContainerResponse response = recordFactory.newRecordInstance(StartContainerResponse.class);
+ return response;
+ }
+
+ synchronized public void checkResourceUsage() {
+ LOG.info("Checking resource usage for " + containerManagerAddress);
+ Assert.assertEquals(available.getMemory(),
+ schedulerNode.getAvailableResource().getMemory());
+ Assert.assertEquals(used.getMemory(),
+ schedulerNode.getUsedResource().getMemory());
+ }
+
+ @Override
+ synchronized public StopContainerResponse stopContainer(StopContainerRequest request)
+ throws YarnRemoteException {
+ ContainerId containerID = request.getContainerId();
+ String applicationId = String.valueOf(containerID.getAppId().getId());
+
+ // Mark the container as COMPLETE
+ List<Container> applicationContainers = containers.get(applicationId);
+ for (Container c : applicationContainers) {
+ if (c.getId().compareTo(containerID) == 0) {
+ c.setState(ContainerState.COMPLETE);
+ }
+ }
+
+ // Send a heartbeat
+ try {
+ heartbeat();
+ } catch (IOException ioe) {
+ throw RPCUtil.getRemoteException(ioe);
+ }
+
+ // Remove container and update status
+ int ctr = 0;
+ Container container = null;
+ for (Iterator<Container> i=applicationContainers.iterator(); i.hasNext();) {
+ container = i.next();
+ if (container.getId().compareTo(containerID) == 0) {
+ i.remove();
+ ++ctr;
+ }
+ }
+
+ if (ctr != 1) {
+ throw new IllegalStateException("Container " + containerID +
+ " stopped " + ctr + " times!");
+ }
+
+ Resources.addTo(available, container.getResource());
+ Resources.subtractFrom(used, container.getResource());
+
+ LOG.info("DEBUG --- stopContainer:" +
+ " node=" + containerManagerAddress +
+ " application=" + applicationId +
+ " container=" + containerID +
+ " available=" + available +
+ " used=" + used);
+
+ StopContainerResponse response = recordFactory.newRecordInstance(StopContainerResponse.class);
+ return response;
+ }
+
+ @Override
+ synchronized public GetContainerStatusResponse getContainerStatus(GetContainerStatusRequest request) throws YarnRemoteException {
+ ContainerId containerID = request.getContainerId();
+ GetContainerStatusResponse response = recordFactory.newRecordInstance(GetContainerStatusResponse.class);
+ return response;
+ }
+
+ public static org.apache.hadoop.yarn.server.api.records.NodeStatus createNodeStatus(
+ NodeId nodeId, Map<ApplicationId, List<Container>> containers) {
+ RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+ org.apache.hadoop.yarn.server.api.records.NodeStatus nodeStatus = recordFactory.newRecordInstance(org.apache.hadoop.yarn.server.api.records.NodeStatus.class);
+ nodeStatus.setNodeId(nodeId);
+ nodeStatus.addAllContainers(containers);
+ NodeHealthStatus nodeHealthStatus =
+ recordFactory.newRecordInstance(NodeHealthStatus.class);
+ nodeHealthStatus.setIsNodeHealthy(true);
+ nodeStatus.setNodeHealthStatus(nodeHealthStatus);
+ return nodeStatus;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Task.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Task.java
new file mode 100644
index 0000000..0fac0cd
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Task.java
@@ -0,0 +1,145 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+/**
+ *
+ */
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
+
+public class Task {
+ private static final Log LOG = LogFactory.getLog(Task.class);
+
+ public enum State {PENDING, ALLOCATED, RUNNING, COMPLETE};
+
+ final private ApplicationId applicationId;
+ final private int taskId;
+ final private Priority priority;
+
+ final private Set<String> hosts = new HashSet<String>();
+ final private Set<String> racks = new HashSet<String>();
+
+ private ContainerId containerId;
+ private org.apache.hadoop.yarn.server.resourcemanager.NodeManager nodeManager;
+
+ private State state;
+
+ public Task(Application application, Priority priority, String[] hosts) {
+ this.applicationId = application.getApplicationId();
+ this.priority = priority;
+
+ taskId = application.getNextTaskId();
+ state = State.PENDING;
+
+ // Special case: Don't care about locality
+ if (!(hosts.length == 1 &&
+ hosts[0].equals(
+ org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode.ANY))) {
+ for (String host : hosts) {
+ this.hosts.add(host);
+ this.racks.add(Application.resolve(host));
+ }
+ }
+ LOG.info("Task " + taskId + " added to application " + this.applicationId +
+ " with " + this.hosts.size() + " hosts, " + racks.size() + " racks");
+ }
+
+ public int getTaskId() {
+ return taskId;
+ }
+
+ public Priority getPriority() {
+ return priority;
+ }
+
+ public org.apache.hadoop.yarn.server.resourcemanager.NodeManager getNodeManager() {
+ return nodeManager;
+ }
+
+ public ContainerId getContainerId() {
+ return containerId;
+ }
+
+ public ApplicationId getApplicationID() {
+ return applicationId;
+ }
+
+ public String[] getHosts() {
+ return hosts.toArray(new String[hosts.size()]);
+ }
+
+ public String[] getRacks() {
+ return racks.toArray(new String[racks.size()]);
+ }
+
+ public boolean canSchedule(NodeType type, String hostName) {
+ if (type == NodeType.NODE_LOCAL) {
+ return hosts.contains(hostName);
+ } else if (type == NodeType.RACK_LOCAL) {
+ return racks.contains(Application.resolve(hostName));
+ }
+
+ return true;
+ }
+
+ public void start(NodeManager nodeManager, ContainerId containerId) {
+ this.nodeManager = nodeManager;
+ this.containerId = containerId;
+ setState(State.RUNNING);
+ }
+
+ public void stop() {
+ if (getState() != State.RUNNING) {
+ throw new IllegalStateException("Trying to stop a non-running task: " +
+ getTaskId() + " of application " + getApplicationID());
+ }
+ this.nodeManager = null;
+ this.containerId = null;
+ setState(State.COMPLETE);
+ }
+
+ public State getState() {
+ return state;
+ }
+
+ private void setState(State state) {
+ this.state = state;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj instanceof Task) {
+ return ((Task)obj).taskId == this.taskId;
+ }
+ return super.equals(obj);
+ }
+
+ @Override
+ public int hashCode() {
+ return taskId;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
new file mode 100644
index 0000000..ae8ef3b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
@@ -0,0 +1,93 @@
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.util.Records;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.junit.Test;
+import org.mortbay.log.Log;
+
+public class TestApplicationCleanup {
+
+ @Test
+ public void testAppCleanup() throws Exception {
+ Logger rootLogger = LogManager.getRootLogger();
+ rootLogger.setLevel(Level.DEBUG);
+ MockRM rm = new MockRM();
+ rm.start();
+
+ MockNM nm1 = rm.registerNode("h1:1234", 5000);
+
+ RMApp app = rm.submitApp(2000);
+
+ //kick the scheduling
+ nm1.nodeHeartbeat(true);
+
+ RMAppAttempt attempt = app.getCurrentAppAttempt();
+ MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());
+ am.registerAppAttempt();
+
+ //request for containers
+ int request = 2;
+ am.allocate("h1" , 1000, request,
+ new ArrayList<ContainerId>());
+
+ //kick the scheduler
+ nm1.nodeHeartbeat(true);
+ List<Container> conts = am.allocate(new ArrayList<ResourceRequest>(),
+ new ArrayList<ContainerId>()).getNewContainerList();
+ int contReceived = conts.size();
+ while (contReceived < request) {
+ conts = am.allocate(new ArrayList<ResourceRequest>(),
+ new ArrayList<ContainerId>()).getNewContainerList();
+ contReceived += conts.size();
+ Log.info("Got " + contReceived + " containers. Waiting to get " + request);
+ Thread.sleep(2000);
+ }
+ Assert.assertEquals(request, conts.size());
+
+ am.unregisterAppAttempt();
+ am.waitForState(RMAppAttemptState.FINISHED);
+
+ int cleanedConts = 0;
+ int cleanedApps = 0;
+ List<ContainerId> contsToClean = null;
+ List<ApplicationId> apps = null;
+
+ //currently only containers are cleaned via this
+ //AM container is cleaned via container launcher
+ while (cleanedConts < 2 || cleanedApps < 1) {
+ HeartbeatResponse resp = nm1.nodeHeartbeat(true);
+ contsToClean = resp.getContainersToCleanupList();
+ apps = resp.getApplicationsToCleanupList();
+ Log.info("Waiting to get cleanup events.. cleanedConts: "
+ + cleanedConts + " cleanedApps: " + cleanedApps);
+ cleanedConts += contsToClean.size();
+ cleanedApps += apps.size();
+ Thread.sleep(1000);
+ }
+
+ Assert.assertEquals(1, apps.size());
+ Assert.assertEquals(app.getApplicationId(), apps.get(0));
+
+ rm.stop();
+ }
+
+ public static void main(String[] args) throws Exception {
+ TestApplicationCleanup t = new TestApplicationCleanup();
+ t.testAppCleanup();
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java
new file mode 100644
index 0000000..0fa897a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java
@@ -0,0 +1,151 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.util.List;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.AMResponse;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestFifoScheduler {
+ private static final Log LOG = LogFactory.getLog(TestFifoScheduler.class);
+
+ private ResourceManager resourceManager = null;
+
+ @Before
+ public void setUp() throws Exception {
+ Store store = StoreFactory.getStore(new Configuration());
+ resourceManager = new ResourceManager(store);
+ resourceManager.init(new Configuration());
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ }
+
+ @Test
+ public void test() throws Exception {
+ Logger rootLogger = LogManager.getRootLogger();
+ rootLogger.setLevel(Level.DEBUG);
+ MockRM rm = new MockRM();
+ rm.start();
+ int GB = 1024;
+ MockNM nm1 = rm.registerNode("h1:1234", 6 * GB);
+ MockNM nm2 = rm.registerNode("h2:5678", 4 * GB);
+
+ RMApp app1 = rm.submitApp(2048);
+ // kick the scheduling, 2 GB given to AM1, remaining 4GB on nm1
+ nm1.nodeHeartbeat(true);
+ RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
+ MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
+ am1.registerAppAttempt();
+ Assert.assertEquals(2 * GB, rm.getResourceScheduler().getUsedResource(
+ nm1.getNodeId()).getMemory());
+
+ RMApp app2 = rm.submitApp(2048);
+ // kick the scheduling, 2GB given to AM, remaining 2 GB on nm2
+ nm2.nodeHeartbeat(true);
+ RMAppAttempt attempt2 = app2.getCurrentAppAttempt();
+ MockAM am2 = rm.sendAMLaunched(attempt2.getAppAttemptId());
+ am2.registerAppAttempt();
+ Assert.assertEquals(2 * GB, rm.getResourceScheduler().getUsedResource(
+ nm2.getNodeId()).getMemory());
+
+ // add request for containers
+ am1.addRequests(new String[] { "h1", "h2" }, GB, 1, 1);
+ AMResponse am1Response = am1.schedule(); // send the request
+ // add request for containers
+ am2.addRequests(new String[] { "h1", "h2" }, 3 * GB, 0, 1);
+ AMResponse am2Response = am2.schedule(); // send the request
+
+ // kick the scheduler, 1 GB and 3 GB given to AM1 and AM2, remaining 0
+ nm1.nodeHeartbeat(true);
+ while (am1Response.getNewContainerCount() < 1) {
+ LOG.info("Waiting for containers to be created for app 1...");
+ Thread.sleep(1000);
+ am1Response = am1.schedule();
+ }
+ while (am2Response.getNewContainerCount() < 1) {
+ LOG.info("Waiting for containers to be created for app 2...");
+ Thread.sleep(1000);
+ am2Response = am2.schedule();
+ }
+ // kick the scheduler, nothing given remaining 2 GB.
+ nm2.nodeHeartbeat(true);
+
+ List<Container> allocated1 = am1Response.getNewContainerList();
+ Assert.assertEquals(1, allocated1.size());
+ Assert.assertEquals(1 * GB, allocated1.get(0).getResource().getMemory());
+ Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());
+
+ List<Container> allocated2 = am2Response.getNewContainerList();
+ Assert.assertEquals(1, allocated2.size());
+ Assert.assertEquals(3 * GB, allocated2.get(0).getResource().getMemory());
+ Assert.assertEquals(nm1.getNodeId(), allocated2.get(0).getNodeId());
+
+ Assert.assertEquals(0, rm.getResourceScheduler().getAvailableResource(
+ nm1.getNodeId()).getMemory());
+ Assert.assertEquals(2 * GB, rm.getResourceScheduler()
+ .getAvailableResource(nm2.getNodeId()).getMemory());
+
+ Assert.assertEquals(6 * GB, rm.getResourceScheduler().getUsedResource(
+ nm1.getNodeId()).getMemory());
+ Assert.assertEquals(2 * GB, rm.getResourceScheduler().getUsedResource(
+ nm2.getNodeId()).getMemory());
+
+ Container c1 = allocated1.get(0);
+ Assert.assertEquals(GB, c1.getResource().getMemory());
+ c1.setState(ContainerState.COMPLETE);
+ nm1.containerStatus(c1);
+ int waitCount = 0;
+ while (attempt1.getJustFinishedContainers().size() < 1
+ && waitCount++ != 20) {
+ LOG.info("Waiting for containers to be finished for app 1... Tried "
+ + waitCount + " times already..");
+ Thread.sleep(1000);
+ }
+ Assert.assertEquals(1, attempt1.getJustFinishedContainers().size());
+ Assert.assertEquals(1, am1.schedule().getFinishedContainerList().size());
+ Assert.assertEquals(5 * GB, rm.getResourceScheduler().getUsedResource(
+ nm1.getNodeId()).getMemory());
+
+ rm.stop();
+ }
+
+ public static void main(String[] args) throws Exception {
+ TestFifoScheduler t = new TestFifoScheduler();
+ t.test();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java
new file mode 100644
index 0000000..bc506e6
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java
@@ -0,0 +1,107 @@
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.junit.Test;
+
+public class TestRM {
+
+ private static final Log LOG = LogFactory.getLog(TestRM.class);
+
+ @Test
+ public void testAppWithNoContainers() throws Exception {
+ Logger rootLogger = LogManager.getRootLogger();
+ rootLogger.setLevel(Level.DEBUG);
+ MockRM rm = new MockRM();
+ rm.start();
+ MockNM nm1 = rm.registerNode("h1:1234", 5120);
+
+ RMApp app = rm.submitApp(2000);
+
+ //kick the scheduling
+ nm1.nodeHeartbeat(true);
+
+ RMAppAttempt attempt = app.getCurrentAppAttempt();
+ MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());
+ am.registerAppAttempt();
+ am.unregisterAppAttempt();
+ am.waitForState(RMAppAttemptState.FINISHED);
+ rm.stop();
+ }
+
+ @Test
+ public void testAppOnMultiNode() throws Exception {
+ Logger rootLogger = LogManager.getRootLogger();
+ rootLogger.setLevel(Level.DEBUG);
+ MockRM rm = new MockRM();
+ rm.start();
+ MockNM nm1 = rm.registerNode("h1:1234", 5120);
+ MockNM nm2 = rm.registerNode("h2:5678", 10240);
+
+ RMApp app = rm.submitApp(2000);
+
+ //kick the scheduling
+ nm1.nodeHeartbeat(true);
+
+ RMAppAttempt attempt = app.getCurrentAppAttempt();
+ MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());
+ am.registerAppAttempt();
+
+ //request for containers
+ int request = 13;
+ am.allocate("h1" , 1000, request, new ArrayList<ContainerId>());
+
+ //kick the scheduler
+ nm1.nodeHeartbeat(true);
+ List<Container> conts = am.allocate(new ArrayList<ResourceRequest>(),
+ new ArrayList<ContainerId>()).getNewContainerList();
+ int contReceived = conts.size();
+ while (contReceived < 3) {//only 3 containers are available on node1
+ conts.addAll(am.allocate(new ArrayList<ResourceRequest>(),
+ new ArrayList<ContainerId>()).getNewContainerList());
+ contReceived = conts.size();
+ LOG.info("Got " + contReceived + " containers. Waiting to get " + 3);
+ Thread.sleep(2000);
+ }
+ Assert.assertEquals(3, conts.size());
+
+ //send node2 heartbeat
+ nm2.nodeHeartbeat(true);
+ conts = am.allocate(new ArrayList<ResourceRequest>(),
+ new ArrayList<ContainerId>()).getNewContainerList();
+ contReceived = conts.size();
+ while (contReceived < 10) {
+ conts.addAll(am.allocate(new ArrayList<ResourceRequest>(),
+ new ArrayList<ContainerId>()).getNewContainerList());
+ contReceived = conts.size();
+ LOG.info("Got " + contReceived + " containers. Waiting to get " + 10);
+ Thread.sleep(2000);
+ }
+ Assert.assertEquals(10, conts.size());
+
+ am.unregisterAppAttempt();
+ am.waitForState(RMAppAttemptState.FINISHED);
+
+ rm.stop();
+ }
+
+ public static void main(String[] args) throws Exception {
+ TestRM t = new TestRM();
+ t.testAppWithNoContainers();
+ t.testAppOnMultiNode();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
new file mode 100644
index 0000000..60a227b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
@@ -0,0 +1,164 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestResourceManager {
+ private static final Log LOG = LogFactory.getLog(TestResourceManager.class);
+
+ private ResourceManager resourceManager = null;
+
+ @Before
+ public void setUp() throws Exception {
+ Configuration conf = new Configuration();
+ Store store = StoreFactory.getStore(conf);
+ resourceManager = new ResourceManager(store);
+ resourceManager.init(conf);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ }
+
+ private org.apache.hadoop.yarn.server.resourcemanager.NodeManager
+ registerNode(String hostName, int containerManagerPort, int httpPort,
+ String rackName, int memory) throws IOException {
+ return new org.apache.hadoop.yarn.server.resourcemanager.NodeManager(
+ hostName, containerManagerPort, httpPort, rackName, memory,
+ resourceManager.getResourceTrackerService(), resourceManager
+ .getRMContext());
+ }
+
+// @Test
+ public void testResourceAllocation() throws IOException {
+ LOG.info("--- START: testResourceAllocation ---");
+
+ final int memory = 4 * 1024;
+
+ // Register node1
+ String host1 = "host1";
+ org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm1 =
+ registerNode(host1, 1234, 2345, NetworkTopology.DEFAULT_RACK, memory);
+ nm1.heartbeat();
+
+ // Register node2
+ String host2 = "host2";
+ org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm2 =
+ registerNode(host2, 1234, 2345, NetworkTopology.DEFAULT_RACK, memory/2);
+ nm2.heartbeat();
+
+ // Submit an application
+ Application application = new Application("user1", resourceManager);
+ application.submit();
+
+ application.addNodeManager(host1, 1234, nm1);
+ application.addNodeManager(host2, 1234, nm2);
+
+ // Application resource requirements
+ final int memory1 = 1024;
+ Resource capability1 = Resources.createResource(memory1);
+ Priority priority1 =
+ org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.create(1);
+ application.addResourceRequestSpec(priority1, capability1);
+
+ Task t1 = new Task(application, priority1, new String[] {host1, host2});
+ application.addTask(t1);
+
+ final int memory2 = 2048;
+ Resource capability2 = Resources.createResource(memory2);
+ Priority priority0 =
+ org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.create(0); // higher
+ application.addResourceRequestSpec(priority0, capability2);
+
+ // Send resource requests to the scheduler
+ application.schedule();
+
+ // Send a heartbeat to kick the tires on the Scheduler
+ nm1.heartbeat();
+
+ // Get allocations from the scheduler
+ application.schedule();
+
+ nm1.heartbeat();
+ checkResourceUsage(nm1, nm2);
+
+ LOG.info("Adding new tasks...");
+
+ Task t2 = new Task(application, priority1, new String[] {host1, host2});
+ application.addTask(t2);
+
+ Task t3 = new Task(application, priority0, new String[] {RMNode.ANY});
+ application.addTask(t3);
+
+ // Send resource requests to the scheduler
+ application.schedule();
+ checkResourceUsage(nm1, nm2);
+
+ // Send a heartbeat to kick the tires on the Scheduler
+ LOG.info("Sending hb from host2");
+ nm2.heartbeat();
+
+ LOG.info("Sending hb from host1");
+ nm1.heartbeat();
+
+ // Get allocations from the scheduler
+ LOG.info("Trying to allocate...");
+ application.schedule();
+
+ nm1.heartbeat();
+ nm2.heartbeat();
+ checkResourceUsage(nm1, nm2);
+
+ // Complete tasks
+ LOG.info("Finishing up tasks...");
+ application.finishTask(t1);
+ application.finishTask(t2);
+ application.finishTask(t3);
+
+ // Send heartbeat
+ nm1.heartbeat();
+ nm2.heartbeat();
+ checkResourceUsage(nm1, nm2);
+
+ LOG.info("--- END: testResourceAllocation ---");
+ }
+
+ private void checkResourceUsage(
+ org.apache.hadoop.yarn.server.resourcemanager.NodeManager... nodes ) {
+ for (org.apache.hadoop.yarn.server.resourcemanager.NodeManager nodeManager : nodes) {
+ nodeManager.checkResourceUsage();
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
new file mode 100644
index 0000000..95690f0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
@@ -0,0 +1,288 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager;
+
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.yarn.MockApps;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationMaster;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.api.records.ApplicationStatus;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store.RMState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.util.Records;
+
+import com.google.common.collect.Lists;
+
+@InterfaceAudience.Private
+public abstract class MockAsm extends MockApps {
+ static final int DT = 1000000; // ms
+
+ public static class AppMasterBase implements ApplicationMaster {
+ @Override
+ public ApplicationId getApplicationId() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public String getHost() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public int getRpcPort() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public String getTrackingUrl() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public ApplicationStatus getStatus() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public ApplicationState getState() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public String getClientToken() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public int getAMFailCount() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public int getContainerCount() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public String getDiagnostics() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public void setApplicationId(ApplicationId appId) {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public void setHost(String host) {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public void setRpcPort(int rpcPort) {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public void setTrackingUrl(String url) {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public void setStatus(ApplicationStatus status) {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public void setState(ApplicationState state) {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public void setClientToken(String clientToken) {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public void setAMFailCount(int amFailCount) {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public void setContainerCount(int containerCount) {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public void setDiagnostics(String diagnostics) {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+ }
+
+ public static class ApplicationBase implements RMApp {
+ @Override
+ public String getUser() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public String getName() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public String getQueue() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public long getStartTime() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ @Override
+ public long getFinishTime() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+ @Override
+ public StringBuilder getDiagnostics() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+ @Override
+ public ApplicationId getApplicationId() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+ @Override
+ public RMAppAttempt getCurrentAppAttempt() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+ @Override
+ public ApplicationStore getApplicationStore() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+ @Override
+ public float getProgress() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+ @Override
+ public RMAppAttempt getRMAppAttempt(ApplicationAttemptId appAttemptId) {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+ @Override
+ public RMAppState getState() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+ @Override
+ public String getTrackingUrl() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+ @Override
+ public ApplicationReport createAndGetApplicationReport() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+ @Override
+ public void handle(RMAppEvent event) {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+ }
+
+ public static RMApp newApplication(int i) {
+ final ApplicationId id = newAppID(i);
+ final Container masterContainer = Records.newRecord(Container.class);
+ ContainerId containerId = Records.newRecord(ContainerId.class);
+ containerId.setAppId(id);
+ masterContainer.setId(containerId);
+ masterContainer.setNodeHttpAddress("node:port");
+ final String user = newUserName();
+ final String name = newAppName();
+ final String queue = newQueue();
+ final long start = System.currentTimeMillis() - (int)(Math.random()*DT);
+ final long finish = Math.random() < 0.5 ? 0 :
+ System.currentTimeMillis() + (int)(Math.random()*DT);
+ return new ApplicationBase() {
+ @Override
+ public ApplicationId getApplicationId() {
+ return id;
+ }
+ @Override
+ public String getUser() {
+ return user;
+ }
+
+ @Override
+ public String getName() {
+ return name;
+ }
+
+ @Override
+ public String getQueue() {
+ return queue;
+ }
+
+ @Override
+ public long getStartTime() {
+ return start;
+ }
+
+ @Override
+ public long getFinishTime() {
+ return finish;
+ }
+ @Override
+ public String getTrackingUrl() {
+ return null;
+ }
+ @Override
+ public RMAppState getState() {
+ return RMAppState.RUNNING;
+ }
+ @Override
+ public StringBuilder getDiagnostics() {
+ return new StringBuilder();
+ }
+ @Override
+ public float getProgress() {
+ return (float)Math.random();
+ }
+ };
+ }
+
+ public static List<RMApp> newApplications(int n) {
+ List<RMApp> list = Lists.newArrayList();
+ for (int i = 0; i < n; ++i) {
+ list.add(newApplication(i));
+ }
+ return list;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
new file mode 100644
index 0000000..f8ec9f47
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
@@ -0,0 +1,235 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationMaster;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager;
+import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService;
+import org.apache.hadoop.yarn.server.resourcemanager.RMConfig;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/* a test case that tests the launch failure of a AM */
+public class TestAMLaunchFailure {
+// private static final Log LOG = LogFactory.getLog(TestAMLaunchFailure.class);
+// private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+// ApplicationsManagerImpl asmImpl;
+// YarnScheduler scheduler = new DummyYarnScheduler();
+// ApplicationTokenSecretManager applicationTokenSecretManager =
+// new ApplicationTokenSecretManager();
+// private ClientRMService clientService;
+//
+// private RMContext context;
+//
+// private static class DummyYarnScheduler implements YarnScheduler {
+// private Container container = recordFactory.newRecordInstance(Container.class);
+//
+// @Override
+// public Allocation allocate(ApplicationId applicationId,
+// List<ResourceRequest> ask, List<Container> release) throws IOException {
+// return new Allocation(Arrays.asList(container), Resources.none());
+// }
+//
+// @Override
+// public QueueInfo getQueueInfo(String queueName,
+// boolean includeChildQueues,
+// boolean recursive) throws IOException {
+// return null;
+// }
+//
+// @Override
+// public List<QueueUserACLInfo> getQueueUserAclInfo() {
+// return null;
+// }
+//
+// @Override
+// public void addApplication(ApplicationId applicationId,
+// ApplicationMaster master, String user, String queue, Priority priority
+// , ApplicationStore appStore)
+// throws IOException {
+// // TODO Auto-generated method stub
+//
+// }
+//
+// @Override
+// public Resource getMaximumResourceCapability() {
+// // TODO Auto-generated method stub
+// return null;
+// }
+//
+// @Override
+// public Resource getMinimumResourceCapability() {
+// // TODO Auto-generated method stub
+// return null;
+// }
+// }
+//
+// private class DummyApplicationTracker implements EventHandler<ASMEvent<ApplicationTrackerEventType>> {
+// public DummyApplicationTracker() {
+// context.getDispatcher().register(ApplicationTrackerEventType.class, this);
+// }
+// @Override
+// public void handle(ASMEvent<ApplicationTrackerEventType> event) {
+// }
+// }
+//
+// public class ExtApplicationsManagerImpl extends ApplicationsManagerImpl {
+//
+// private class DummyApplicationMasterLauncher implements EventHandler<ASMEvent<AMLauncherEventType>> {
+// private AtomicInteger notify = new AtomicInteger();
+// private AppAttempt app;
+//
+// public DummyApplicationMasterLauncher(RMContext context) {
+// context.getDispatcher().register(AMLauncherEventType.class, this);
+// new TestThread().start();
+// }
+// @Override
+// public void handle(ASMEvent<AMLauncherEventType> appEvent) {
+// switch(appEvent.getType()) {
+// case LAUNCH:
+// LOG.info("LAUNCH called ");
+// app = appEvent.getApplication();
+// synchronized (notify) {
+// notify.addAndGet(1);
+// notify.notify();
+// }
+// break;
+// }
+// }
+//
+// private class TestThread extends Thread {
+// public void run() {
+// synchronized(notify) {
+// try {
+// while (notify.get() == 0) {
+// notify.wait();
+// }
+// } catch (InterruptedException e) {
+// e.printStackTrace();
+// }
+// context.getDispatcher().getEventHandler().handle(
+// new ApplicationEvent(ApplicationEventType.LAUNCHED,
+// app.getApplicationID()));
+// }
+// }
+// }
+// }
+//
+// public ExtApplicationsManagerImpl(
+// ApplicationTokenSecretManager applicationTokenSecretManager,
+// YarnScheduler scheduler) {
+// super(applicationTokenSecretManager, scheduler, context);
+// }
+//
+// @Override
+// protected EventHandler<ASMEvent<AMLauncherEventType>> createNewApplicationMasterLauncher(
+// ApplicationTokenSecretManager tokenSecretManager) {
+// return new DummyApplicationMasterLauncher(context);
+// }
+// }
+//
+//
+// @Before
+// public void setUp() {
+// context = new RMContextImpl(new MemStore());
+// Configuration conf = new Configuration();
+//
+// context.getDispatcher().register(ApplicationEventType.class,
+// new ResourceManager.ApplicationEventDispatcher(context));
+//
+// context.getDispatcher().init(conf);
+// context.getDispatcher().start();
+//
+// asmImpl = new ExtApplicationsManagerImpl(applicationTokenSecretManager, scheduler);
+// clientService = new ClientRMService(context, asmImpl
+// .getAmLivelinessMonitor(), asmImpl.getClientToAMSecretManager(),
+// scheduler);
+// clientService.init(conf);
+// new DummyApplicationTracker();
+// conf.setLong(YarnConfiguration.AM_EXPIRY_INTERVAL, 3000L);
+// conf.setInt(RMConfig.AM_MAX_RETRIES, 1);
+// asmImpl.init(conf);
+// asmImpl.start();
+// }
+//
+// @After
+// public void tearDown() {
+// asmImpl.stop();
+// }
+//
+// private ApplicationSubmissionContext createDummyAppContext(ApplicationId appID) {
+// ApplicationSubmissionContext context = recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
+// context.setApplicationId(appID);
+// return context;
+// }
+//
+// @Test
+// public void testAMLaunchFailure() throws Exception {
+// ApplicationId appID = clientService.getNewApplicationId();
+// ApplicationSubmissionContext submissionContext = createDummyAppContext(appID);
+// SubmitApplicationRequest request = recordFactory
+// .newRecordInstance(SubmitApplicationRequest.class);
+// request.setApplicationSubmissionContext(submissionContext);
+// clientService.submitApplication(request);
+// AppAttempt application = context.getApplications().get(appID);
+//
+// while (application.getState() != ApplicationState.FAILED) {
+// LOG.info("Waiting for application to go to FAILED state."
+// + " Current state is " + application.getState());
+// Thread.sleep(200);
+// application = context.getApplications().get(appID);
+// }
+// Assert.assertEquals(ApplicationState.FAILED, application.getState());
+// }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCResponseId.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCResponseId.java
new file mode 100644
index 0000000..61d678e
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCResponseId.java
@@ -0,0 +1,99 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.records.AMResponse;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService;
+import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService;
+import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestAMRMRPCResponseId {
+
+ private static final RecordFactory recordFactory = RecordFactoryProvider
+ .getRecordFactory(null);
+
+ private MockRM rm;
+ ApplicationMasterService amService = null;
+ private ClientRMService clientService;
+
+ private RMContext context;
+
+ @Before
+ public void setUp() {
+ this.rm = new MockRM();
+ rm.start();
+ this.clientService = rm.getClientRMService();
+ amService = rm.getApplicationMasterService();
+ }
+
+ @After
+ public void tearDown() {
+ if (rm != null) {
+ this.rm.stop();
+ }
+ }
+
+ @Test
+ public void testARRMResponseId() throws Exception {
+
+ MockNM nm1 = rm.registerNode("h1:1234", 5000);
+
+ RMApp app = rm.submitApp(2000);
+
+ // Trigger the scheduling so the AM gets 'launched'
+ nm1.nodeHeartbeat(true);
+
+ RMAppAttempt attempt = app.getCurrentAppAttempt();
+ MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());
+
+ am.registerAppAttempt();
+
+ AllocateRequest allocateRequest = recordFactory.newRecordInstance(AllocateRequest.class);
+ allocateRequest.setApplicationAttemptId(attempt.getAppAttemptId());
+
+ AMResponse response = amService.allocate(allocateRequest).getAMResponse();
+ Assert.assertEquals(1, response.getResponseId());
+ Assert.assertFalse(response.getReboot());
+ allocateRequest.setResponseId(response.getResponseId());
+
+ response = amService.allocate(allocateRequest).getAMResponse();
+ Assert.assertEquals(2, response.getResponseId());
+ /* try resending */
+ response = amService.allocate(allocateRequest).getAMResponse();
+ Assert.assertEquals(2, response.getResponseId());
+
+ /** try sending old **/
+ allocateRequest.setResponseId(0);
+ response = amService.allocate(allocateRequest).getAMResponse();
+ Assert.assertTrue(response.getReboot());
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
new file mode 100644
index 0000000..7635f51
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
@@ -0,0 +1,292 @@
+package org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationMaster;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerToken;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager;
+import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService;
+import org.apache.hadoop.yarn.server.resourcemanager.RMConfig;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store.RMState;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
+import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test to restart the AM on failure.
+ *
+ */
+public class TestAMRestart {
+// private static final Log LOG = LogFactory.getLog(TestAMRestart.class);
+// ApplicationsManagerImpl appImpl;
+// RMContext asmContext = new RMContextImpl(new MemStore());
+// ApplicationTokenSecretManager appTokenSecretManager =
+// new ApplicationTokenSecretManager();
+// DummyResourceScheduler scheduler;
+// private ClientRMService clientRMService;
+// int count = 0;
+// ApplicationId appID;
+// final int maxFailures = 3;
+// AtomicInteger launchNotify = new AtomicInteger();
+// AtomicInteger schedulerNotify = new AtomicInteger();
+// volatile boolean stop = false;
+// int schedulerAddApplication = 0;
+// int schedulerRemoveApplication = 0;
+// int launcherLaunchCalled = 0;
+// int launcherCleanupCalled = 0;
+// private final static RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+//
+// private class ExtApplicationsManagerImpl extends ApplicationsManagerImpl {
+// public ExtApplicationsManagerImpl(
+// ApplicationTokenSecretManager applicationTokenSecretManager,
+// YarnScheduler scheduler, RMContext asmContext) {
+// super(applicationTokenSecretManager, scheduler, asmContext);
+// }
+//
+// @Override
+// public EventHandler<ASMEvent<AMLauncherEventType>> createNewApplicationMasterLauncher(
+// ApplicationTokenSecretManager tokenSecretManager) {
+// return new DummyAMLauncher();
+// }
+// }
+//
+// private class DummyAMLauncher implements EventHandler<ASMEvent<AMLauncherEventType>> {
+//
+// public DummyAMLauncher() {
+// asmContext.getDispatcher().register(AMLauncherEventType.class, this);
+// new Thread() {
+// public void run() {
+// while (!stop) {
+// LOG.info("DEBUG -- waiting for launch");
+// synchronized(launchNotify) {
+// while (launchNotify.get() == 0) {
+// try {
+// launchNotify.wait();
+// } catch (InterruptedException e) {
+// }
+// }
+// asmContext.getDispatcher().getEventHandler().handle(
+// new ApplicationEvent(
+// ApplicationEventType.LAUNCHED, appID));
+// launchNotify.addAndGet(-1);
+// }
+// }
+// }
+// }.start();
+// }
+//
+// @Override
+// public void handle(ASMEvent<AMLauncherEventType> event) {
+// switch (event.getType()) {
+// case CLEANUP:
+// launcherCleanupCalled++;
+// break;
+// case LAUNCH:
+// LOG.info("DEBUG -- launching");
+// launcherLaunchCalled++;
+// synchronized (launchNotify) {
+// launchNotify.addAndGet(1);
+// launchNotify.notify();
+// }
+// break;
+// default:
+// break;
+// }
+// }
+// }
+//
+// private class DummyResourceScheduler implements ResourceScheduler {
+//
+// @Override
+// public void removeNode(RMNode node) {
+// }
+//
+// @Override
+// public Allocation allocate(ApplicationId applicationId,
+// List<ResourceRequest> ask, List<Container> release) throws IOException {
+// Container container = recordFactory.newRecordInstance(Container.class);
+// container.setContainerToken(recordFactory.newRecordInstance(ContainerToken.class));
+// container.setNodeId(recordFactory.newRecordInstance(NodeId.class));
+// container.setContainerManagerAddress("localhost");
+// container.setNodeHttpAddress("localhost:9999");
+// container.setId(recordFactory.newRecordInstance(ContainerId.class));
+// container.getId().setAppId(appID);
+// container.getId().setId(count);
+// count++;
+// return new Allocation(Arrays.asList(container), Resources.none());
+// }
+//
+// @Override
+// public void handle(ASMEvent<ApplicationTrackerEventType> event) {
+// switch (event.getType()) {
+// case ADD:
+// schedulerAddApplication++;
+// break;
+// case EXPIRE:
+// schedulerRemoveApplication++;
+// LOG.info("REMOVING app : " + schedulerRemoveApplication);
+// if (schedulerRemoveApplication == maxFailures) {
+// synchronized (schedulerNotify) {
+// schedulerNotify.addAndGet(1);
+// schedulerNotify.notify();
+// }
+// }
+// break;
+// default:
+// break;
+// }
+// }
+//
+// @Override
+// public QueueInfo getQueueInfo(String queueName,
+// boolean includeChildQueues,
+// boolean recursive) throws IOException {
+// return null;
+// }
+// @Override
+// public List<QueueUserACLInfo> getQueueUserAclInfo() {
+// return null;
+// }
+// @Override
+// public void addApplication(ApplicationId applicationId,
+// ApplicationMaster master, String user, String queue, Priority priority,
+// ApplicationStore store)
+// throws IOException {
+// }
+// @Override
+// public void addNode(RMNode nodeInfo) {
+// }
+// @Override
+// public void recover(RMState state) throws Exception {
+// }
+// @Override
+// public void reinitialize(Configuration conf,
+// ContainerTokenSecretManager secretManager, RMContext rmContext)
+// throws IOException {
+// }
+//
+// @Override
+// public void nodeUpdate(RMNode nodeInfo,
+// Map<String, List<Container>> containers) {
+// }
+//
+// @Override
+// public Resource getMaximumResourceCapability() {
+// // TODO Auto-generated method stub
+// return null;
+// }
+//
+// @Override
+// public Resource getMinimumResourceCapability() {
+// // TODO Auto-generated method stub
+// return null;
+// }
+// }
+//
+// @Before
+// public void setUp() {
+//
+// asmContext.getDispatcher().register(ApplicationEventType.class,
+// new ResourceManager.ApplicationEventDispatcher(asmContext));
+//
+// appID = recordFactory.newRecordInstance(ApplicationId.class);
+// appID.setClusterTimestamp(System.currentTimeMillis());
+// appID.setId(1);
+// Configuration conf = new Configuration();
+// scheduler = new DummyResourceScheduler();
+// asmContext.getDispatcher().init(conf);
+// asmContext.getDispatcher().start();
+// asmContext.getDispatcher().register(ApplicationTrackerEventType.class, scheduler);
+// appImpl = new ExtApplicationsManagerImpl(appTokenSecretManager, scheduler, asmContext);
+//
+// conf.setLong(YarnConfiguration.AM_EXPIRY_INTERVAL, 1000L);
+// conf.setInt(RMConfig.AM_MAX_RETRIES, maxFailures);
+// appImpl.init(conf);
+// appImpl.start();
+//
+// this.clientRMService = new ClientRMService(asmContext, appImpl
+// .getAmLivelinessMonitor(), appImpl.getClientToAMSecretManager(),
+// scheduler);
+// this.clientRMService.init(conf);
+// }
+//
+// @After
+// public void tearDown() {
+// }
+//
+// private void waitForFailed(AppAttempt application, ApplicationState
+// finalState) throws Exception {
+// int count = 0;
+// while(application.getState() != finalState && count < 10) {
+// Thread.sleep(500);
+// count++;
+// }
+// Assert.assertEquals(finalState, application.getState());
+// }
+//
+// @Test
+// public void testAMRestart() throws Exception {
+// ApplicationSubmissionContext subContext = recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
+// subContext.setApplicationId(appID);
+// subContext.setApplicationName("dummyApp");
+//// subContext.command = new ArrayList<String>();
+//// subContext.environment = new HashMap<String, String>();
+//// subContext.fsTokens = new ArrayList<String>();
+// subContext.setFsTokensTodo(ByteBuffer.wrap(new byte[0]));
+// SubmitApplicationRequest request = recordFactory
+// .newRecordInstance(SubmitApplicationRequest.class);
+// request.setApplicationSubmissionContext(subContext);
+// clientRMService.submitApplication(request);
+// AppAttempt application = asmContext.getApplications().get(appID);
+// synchronized (schedulerNotify) {
+// while(schedulerNotify.get() == 0) {
+// schedulerNotify.wait();
+// }
+// }
+// Assert.assertEquals(maxFailures, launcherCleanupCalled);
+// Assert.assertEquals(maxFailures, launcherLaunchCalled);
+// Assert.assertEquals(maxFailures, schedulerAddApplication);
+// Assert.assertEquals(maxFailures, schedulerRemoveApplication);
+// Assert.assertEquals(maxFailures, application.getFailedCount());
+// waitForFailed(application, ApplicationState.FAILED);
+// stop = true;
+// }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestASMStateMachine.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestASMStateMachine.java
new file mode 100644
index 0000000..1897508
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestASMStateMachine.java
@@ -0,0 +1,216 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.api.records.ApplicationStatus;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestASMStateMachine {
+// private static final Log LOG = LogFactory.getLog(TestASMStateMachine.class);
+// private static RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+// RMContext context = new RMContextImpl(new MemStore());
+// EventHandler handler;
+// private boolean snreceivedCleanUp = false;
+// private boolean snAllocateReceived = false;
+// private boolean launchCalled = false;
+// private boolean addedApplication = false;
+// private boolean removedApplication = false;
+// private boolean launchCleanupCalled = false;
+// private AtomicInteger waitForState = new AtomicInteger();
+// private Configuration conf = new Configuration();
+// @Before
+// public void setUp() {
+// context.getDispatcher().init(conf);
+// context.getDispatcher().start();
+// handler = context.getDispatcher().getEventHandler();
+// new DummyAMLaunchEventHandler();
+// new DummySNEventHandler();
+// new ApplicationTracker();
+// new MockAppplicationMasterInfo();
+// }
+//
+// @After
+// public void tearDown() {
+//
+// }
+//
+// private class DummyAMLaunchEventHandler implements EventHandler<ASMEvent<AMLauncherEventType>> {
+// AppAttempt application;
+// AtomicInteger amsync = new AtomicInteger(0);
+//
+// public DummyAMLaunchEventHandler() {
+// context.getDispatcher().register(AMLauncherEventType.class, this);
+// }
+//
+// @Override
+// public void handle(ASMEvent<AMLauncherEventType> event) {
+// switch(event.getType()) {
+// case LAUNCH:
+// launchCalled = true;
+// application = event.getApplication();
+// context.getDispatcher().getEventHandler().handle(
+// new ApplicationEvent(ApplicationEventType.LAUNCHED,
+// application.getApplicationID()));
+// break;
+// case CLEANUP:
+// launchCleanupCalled = true;
+// break;
+// }
+// }
+// }
+//
+// private class DummySNEventHandler implements EventHandler<ASMEvent<SNEventType>> {
+// AppAttempt application;
+// AtomicInteger snsync = new AtomicInteger(0);
+//
+// public DummySNEventHandler() {
+// context.getDispatcher().register(SNEventType.class, this);
+// }
+//
+// @Override
+// public void handle(ASMEvent<SNEventType> event) {
+// switch(event.getType()) {
+// case RELEASE:
+// snreceivedCleanUp = true;
+// break;
+// case SCHEDULE:
+// snAllocateReceived = true;
+// application = event.getAppAttempt();
+// context.getDispatcher().getEventHandler().handle(
+// new AMAllocatedEvent(application.getApplicationID(),
+// application.getMasterContainer()));
+// break;
+// }
+// }
+//
+// }
+//
+// private class ApplicationTracker implements EventHandler<ASMEvent<ApplicationTrackerEventType>> {
+// public ApplicationTracker() {
+// context.getDispatcher().register(ApplicationTrackerEventType.class, this);
+// }
+//
+// @Override
+// public void handle(ASMEvent<ApplicationTrackerEventType> event) {
+// switch (event.getType()) {
+// case ADD:
+// addedApplication = true;
+// break;
+// case REMOVE:
+// removedApplication = true;
+// break;
+// }
+// }
+// }
+//
+// private class MockAppplicationMasterInfo implements
+// EventHandler<ApplicationEvent> {
+//
+// MockAppplicationMasterInfo() {
+// context.getDispatcher().register(ApplicationEventType.class, this);
+// }
+// @Override
+// public void handle(ApplicationEvent event) {
+// LOG.info("The event type is " + event.getType());
+// }
+// }
+//
+// private void waitForState( ApplicationState
+// finalState, AppAttemptImpl masterInfo) throws Exception {
+// int count = 0;
+// while(masterInfo.getState() != finalState && count < 10) {
+// Thread.sleep(500);
+// count++;
+// }
+// Assert.assertEquals(finalState, masterInfo.getState());
+// }
+//
+// /* Test the state machine.
+// *
+// */
+// @Test
+// public void testStateMachine() throws Exception {
+// ApplicationSubmissionContext submissioncontext = recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
+// submissioncontext.setApplicationId(recordFactory.newRecordInstance(ApplicationId.class));
+// submissioncontext.getApplicationId().setId(1);
+// submissioncontext.getApplicationId().setClusterTimestamp(System.currentTimeMillis());
+//
+// AppAttemptImpl masterInfo = new AppAttemptImpl(context,
+// conf, "dummyuser", submissioncontext, "dummyToken", StoreFactory
+// .createVoidAppStore(), new AMLivelinessMonitor(context
+// .getDispatcher().getEventHandler()));
+//
+// context.getDispatcher().register(ApplicationEventType.class, masterInfo);
+// handler.handle(new ApplicationEvent(
+// ApplicationEventType.ALLOCATE, submissioncontext.getApplicationId()));
+//
+// waitForState(ApplicationState.LAUNCHED, masterInfo);
+// Assert.assertTrue(snAllocateReceived);
+// Assert.assertTrue(launchCalled);
+// Assert.assertTrue(addedApplication);
+// handler
+// .handle(new AMRegistrationEvent(masterInfo.getMaster()));
+// waitForState(ApplicationState.RUNNING, masterInfo);
+// Assert.assertEquals(ApplicationState.RUNNING, masterInfo.getState());
+//
+// ApplicationStatus status = recordFactory
+// .newRecordInstance(ApplicationStatus.class);
+// status.setApplicationId(masterInfo.getApplicationID());
+// handler.handle(new AMStatusUpdateEvent(status));
+//
+// /* check if the state is still RUNNING */
+//
+// Assert.assertEquals(ApplicationState.RUNNING, masterInfo.getState());
+//
+// handler.handle(new AMFinishEvent(masterInfo.getApplicationID(),
+// ApplicationState.COMPLETED, "", ""));
+// waitForState(ApplicationState.COMPLETED, masterInfo);
+// Assert.assertEquals(ApplicationState.COMPLETED, masterInfo.getState());
+// /* check if clean up is called for everyone */
+// Assert.assertTrue(launchCleanupCalled);
+// Assert.assertTrue(snreceivedCleanUp);
+// Assert.assertTrue(removedApplication);
+//
+// /* check if expiry doesnt make it failed */
+// handler.handle(new ApplicationEvent(ApplicationEventType.EXPIRE,
+// masterInfo.getApplicationID()));
+// Assert.assertEquals(ApplicationState.COMPLETED, masterInfo.getState());
+// }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestApplicationCleanup.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestApplicationCleanup.java
new file mode 100644
index 0000000..9dc79b0
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestApplicationCleanup.java
@@ -0,0 +1,278 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.net.Node;
+import org.apache.hadoop.net.NodeBase;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager;
+import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+
+/**
+ * Testing application cleanup (notifications to nodemanagers).
+ *
+ */
+@Ignore
+public class TestApplicationCleanup {
+// private static final Log LOG = LogFactory.getLog(TestApplicationCleanup.class);
+// private static RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+// private AtomicInteger waitForState = new AtomicInteger(0);
+// private ResourceScheduler scheduler;
+// private final int memoryCapability = 1024;
+// private ExtASM asm;
+// private static final int memoryNeeded = 100;
+//
+// private final RMContext context = new RMContextImpl(new MemStore());
+// private ClientRMService clientService;
+//
+// @Before
+// public void setUp() {
+// new DummyApplicationTracker();
+// scheduler = new FifoScheduler();
+// context.getDispatcher().register(ApplicationTrackerEventType.class, scheduler);
+// Configuration conf = new Configuration();
+// context.getDispatcher().init(conf);
+// context.getDispatcher().start();
+// asm = new ExtASM(new ApplicationTokenSecretManager(), scheduler);
+// asm.init(conf);
+// clientService = new ClientRMService(context,
+// asm.getAmLivelinessMonitor(), asm.getClientToAMSecretManager(),
+// scheduler);
+// }
+//
+// @After
+// public void tearDown() {
+//
+// }
+//
+//
+// private class DummyApplicationTracker implements EventHandler<ASMEvent
+// <ApplicationTrackerEventType>> {
+//
+// public DummyApplicationTracker() {
+// context.getDispatcher().register(ApplicationTrackerEventType.class, this);
+// }
+// @Override
+// public void handle(ASMEvent<ApplicationTrackerEventType> event) {
+// }
+//
+// }
+// private class ExtASM extends ApplicationsManagerImpl {
+// boolean schedulerCleanupCalled = false;
+// boolean launcherLaunchCalled = false;
+// boolean launcherCleanupCalled = false;
+// boolean schedulerScheduleCalled = false;
+//
+// private class DummyApplicationMasterLauncher implements EventHandler<ASMEvent<AMLauncherEventType>> {
+// private AtomicInteger notify = new AtomicInteger(0);
+// private AppAttempt application;
+//
+// public DummyApplicationMasterLauncher(RMContext context) {
+// context.getDispatcher().register(AMLauncherEventType.class, this);
+// }
+//
+// @Override
+// public void handle(ASMEvent<AMLauncherEventType> appEvent) {
+// AMLauncherEventType event = appEvent.getType();
+// switch (event) {
+// case CLEANUP:
+// launcherCleanupCalled = true;
+// break;
+// case LAUNCH:
+// LOG.info("Launcher Launch called");
+// launcherLaunchCalled = true;
+// application = appEvent.getApplication();
+// context.getDispatcher().getEventHandler().handle(
+// new ApplicationEvent(ApplicationEventType.LAUNCHED,
+// application.getApplicationID()));
+// break;
+// default:
+// break;
+// }
+// }
+// }
+//
+// private class DummySchedulerNegotiator implements EventHandler<ASMEvent<SNEventType>> {
+// private AtomicInteger snnotify = new AtomicInteger(0);
+// AppAttempt application;
+// public DummySchedulerNegotiator(RMContext context) {
+// context.getDispatcher().register(SNEventType.class, this);
+// }
+//
+// @Override
+// public void handle(ASMEvent<SNEventType> appEvent) {
+// SNEventType event = appEvent.getType();
+// switch (event) {
+// case RELEASE:
+// schedulerCleanupCalled = true;
+// break;
+// case SCHEDULE:
+// schedulerScheduleCalled = true;
+// application = appEvent.getAppAttempt();
+// context.getDispatcher().getEventHandler().handle(
+// new AMAllocatedEvent(application.getApplicationID(),
+// application.getMasterContainer()));
+// default:
+// break;
+// }
+// }
+//
+// }
+// public ExtASM(ApplicationTokenSecretManager applicationTokenSecretManager,
+// YarnScheduler scheduler) {
+// super(applicationTokenSecretManager, scheduler, context);
+// }
+//
+// @Override
+// protected EventHandler<ASMEvent<SNEventType>> createNewSchedulerNegotiator(
+// YarnScheduler scheduler) {
+// return new DummySchedulerNegotiator(context);
+// }
+//
+// @Override
+// protected EventHandler<ASMEvent<AMLauncherEventType>> createNewApplicationMasterLauncher(
+// ApplicationTokenSecretManager tokenSecretManager) {
+// return new DummyApplicationMasterLauncher(context);
+// }
+//
+// }
+//
+// private void waitForState(ApplicationState
+// finalState, AppAttempt application) throws Exception {
+// int count = 0;
+// while(application.getState() != finalState && count < 10) {
+// Thread.sleep(500);
+// count++;
+// }
+// Assert.assertEquals(finalState, application.getState());
+// }
+//
+//
+// private ResourceRequest createNewResourceRequest(int capability, int i) {
+// ResourceRequest request = recordFactory.newRecordInstance(ResourceRequest.class);
+// request.setCapability(recordFactory.newRecordInstance(Resource.class));
+// request.getCapability().setMemory(capability);
+// request.setNumContainers(1);
+// request.setPriority(recordFactory.newRecordInstance(Priority.class));
+// request.getPriority().setPriority(i);
+// request.setHostName("*");
+// return request;
+// }
+//
+// protected RMNode addNodes(String commonName, int i, int memoryCapability) throws IOException {
+// NodeId nodeId = recordFactory.newRecordInstance(NodeId.class);
+// nodeId.setId(i);
+// String hostName = commonName + "_" + i;
+// Node node = new NodeBase(hostName, NetworkTopology.DEFAULT_RACK);
+// Resource capability = recordFactory.newRecordInstance(Resource.class);
+// capability.setMemory(memoryCapability);
+// return new RMNodeImpl(nodeId, hostName, i, -i, node, capability);
+// }
+//
+// @Test
+// public void testApplicationCleanUp() throws Exception {
+// ApplicationId appID = clientService.getNewApplicationId();
+// ApplicationSubmissionContext submissionContext = recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
+// submissionContext.setApplicationId(appID);
+// submissionContext.setQueue("queuename");
+// submissionContext.setUser("dummyuser");
+// SubmitApplicationRequest request = recordFactory
+// .newRecordInstance(SubmitApplicationRequest.class);
+// request.setApplicationSubmissionContext(submissionContext);
+// clientService.submitApplication(request);
+// waitForState(ApplicationState.LAUNCHED, context.getApplications().get(
+// appID));
+// List<ResourceRequest> reqs = new ArrayList<ResourceRequest>();
+// ResourceRequest req = createNewResourceRequest(100, 1);
+// reqs.add(req);
+// reqs.add(createNewResourceRequest(memoryNeeded, 2));
+// List<Container> release = new ArrayList<Container>();
+// scheduler.allocate(appID, reqs, release);
+// ArrayList<RMNode> nodesAdded = new ArrayList<RMNode>();
+// for (int i = 0; i < 10; i++) {
+// nodesAdded.add(addNodes("localhost", i, memoryCapability));
+// }
+// /* let one node heartbeat */
+// Map<String, List<Container>> containers = new HashMap<String, List<Container>>();
+// RMNode firstNode = nodesAdded.get(0);
+// int firstNodeMemory = firstNode.getAvailableResource().getMemory();
+// RMNode secondNode = nodesAdded.get(1);
+//
+// context.getNodesCollection().updateListener(firstNode, containers);
+// context.getNodesCollection().updateListener(secondNode, containers);
+// LOG.info("Available resource on first node" + firstNode.getAvailableResource());
+// LOG.info("Available resource on second node" + secondNode.getAvailableResource());
+// /* only allocate the containers to the first node */
+// Assert.assertEquals((firstNodeMemory - (2 * memoryNeeded)), firstNode
+// .getAvailableResource().getMemory());
+// context.getDispatcher().getEventHandler().handle(
+// new ApplicationEvent(ApplicationEventType.KILL, appID));
+// while (asm.launcherCleanupCalled != true) {
+// Thread.sleep(500);
+// }
+// Assert.assertTrue(asm.launcherCleanupCalled);
+// Assert.assertTrue(asm.launcherLaunchCalled);
+// Assert.assertTrue(asm.schedulerCleanupCalled);
+// Assert.assertTrue(asm.schedulerScheduleCalled);
+// /* check for update of completed application */
+// context.getNodesCollection().updateListener(firstNode, containers);
+// NodeResponse response = firstNode.statusUpdate(containers);
+// Assert.assertTrue(response.getFinishedApplications().contains(appID));
+// LOG.info("The containers to clean up " + response.getContainersToCleanUp().size());
+// Assert.assertEquals(2, response.getContainersToCleanUp().size());
+// }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestApplicationMasterExpiry.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestApplicationMasterExpiry.java
new file mode 100644
index 0000000..ff4e798
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestApplicationMasterExpiry.java
@@ -0,0 +1,173 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * A test case that tests the expiry of the application master.
+ * More tests can be added to this.
+ */
+public class TestApplicationMasterExpiry {
+// private static final Log LOG = LogFactory.getLog(TestApplicationMasterExpiry.class);
+// private static RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+//
+// private final RMContext context = new RMContextImpl(new MemStore());
+// private AMLivelinessMonitor amLivelinessMonitor;
+//
+// @Before
+// public void setUp() {
+// new DummyApplicationTracker();
+// new DummySN();
+// new DummyLauncher();
+// new ApplicationEventTypeListener();
+// Configuration conf = new Configuration();
+// context.getDispatcher().register(ApplicationEventType.class,
+// new ResourceManager.ApplicationEventDispatcher(context));
+// context.getDispatcher().init(conf);
+// context.getDispatcher().start();
+// conf.setLong(YarnConfiguration.AM_EXPIRY_INTERVAL, 1000L);
+// amLivelinessMonitor = new AMLivelinessMonitor(this.context
+// .getDispatcher().getEventHandler());
+// amLivelinessMonitor.init(conf);
+// amLivelinessMonitor.start();
+// }
+//
+// private class DummyApplicationTracker implements EventHandler<ASMEvent<ApplicationTrackerEventType>> {
+// DummyApplicationTracker() {
+// context.getDispatcher().register(ApplicationTrackerEventType.class, this);
+// }
+// @Override
+// public void handle(ASMEvent<ApplicationTrackerEventType> event) {
+// }
+// }
+//
+// private AtomicInteger expiry = new AtomicInteger();
+// private boolean expired = false;
+//
+// private class ApplicationEventTypeListener implements
+// EventHandler<ApplicationEvent> {
+// ApplicationEventTypeListener() {
+// context.getDispatcher().register(ApplicationEventType.class, this);
+// }
+// @Override
+// public void handle(ApplicationEvent event) {
+// switch(event.getType()) {
+// case EXPIRE:
+// expired = true;
+// LOG.info("Received expiry from application " + event.getApplicationId());
+// synchronized(expiry) {
+// expiry.addAndGet(1);
+// }
+// }
+// }
+// }
+//
+// private class DummySN implements EventHandler<ASMEvent<SNEventType>> {
+// DummySN() {
+// context.getDispatcher().register(SNEventType.class, this);
+// }
+// @Override
+// public void handle(ASMEvent<SNEventType> event) {
+// }
+// }
+//
+// private class DummyLauncher implements EventHandler<ASMEvent<AMLauncherEventType>> {
+// DummyLauncher() {
+// context.getDispatcher().register(AMLauncherEventType.class, this);
+// }
+// @Override
+// public void handle(ASMEvent<AMLauncherEventType> event) {
+// }
+// }
+//
+// private void waitForState(AppAttempt application, ApplicationState
+// finalState) throws Exception {
+// int count = 0;
+// while(application.getState() != finalState && count < 10) {
+// Thread.sleep(500);
+// count++;
+// }
+// Assert.assertEquals(finalState, application.getState());
+// }
+//
+// @Test
+// public void testAMExpiry() throws Exception {
+// ApplicationSubmissionContext submissionContext = recordFactory
+// .newRecordInstance(ApplicationSubmissionContext.class);
+// submissionContext.setApplicationId(recordFactory
+// .newRecordInstance(ApplicationId.class));
+// submissionContext.getApplicationId().setClusterTimestamp(
+// System.currentTimeMillis());
+// submissionContext.getApplicationId().setId(1);
+//
+// ApplicationStore appStore = context.getApplicationsStore()
+// .createApplicationStore(submissionContext.getApplicationId(),
+// submissionContext);
+// AppAttempt application = new AppAttemptImpl(context,
+// new Configuration(), "dummy", submissionContext, "dummytoken", appStore,
+// amLivelinessMonitor);
+// context.getApplications()
+// .put(application.getApplicationID(), application);
+//
+// this.context.getDispatcher().getSyncHandler().handle(
+// new ApplicationEvent(ApplicationEventType.ALLOCATE, submissionContext
+// .getApplicationId()));
+//
+// waitForState(application, ApplicationState.ALLOCATING);
+//
+// this.context.getDispatcher().getEventHandler().handle(
+// new AMAllocatedEvent(application.getApplicationID(),
+// application.getMasterContainer()));
+//
+// waitForState(application, ApplicationState.LAUNCHING);
+//
+// this.context.getDispatcher().getEventHandler().handle(
+// new ApplicationEvent(ApplicationEventType.LAUNCHED,
+// application.getApplicationID()));
+// synchronized(expiry) {
+// while (expiry.get() == 0) {
+// expiry.wait(1000);
+// }
+// }
+// Assert.assertTrue(expired);
+// }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestApplicationMasterLauncher.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestApplicationMasterLauncher.java
new file mode 100644
index 0000000..8cc9484
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestApplicationMasterLauncher.java
@@ -0,0 +1,193 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager;
+import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
+import org.apache.hadoop.yarn.util.Records;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Testing the applications manager launcher.
+ *
+ */
+public class TestApplicationMasterLauncher {
+// private static final Log LOG = LogFactory.getLog(TestApplicationMasterLauncher.class);
+// private static RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+// private ApplicationMasterLauncher amLauncher;
+// private DummyEventHandler asmHandle;
+// private final ApplicationTokenSecretManager applicationTokenSecretManager =
+// new ApplicationTokenSecretManager();
+// private final ClientToAMSecretManager clientToAMSecretManager =
+// new ClientToAMSecretManager();
+//
+// Object doneLaunching = new Object();
+// AtomicInteger launched = new AtomicInteger();
+// AtomicInteger cleanedUp = new AtomicInteger();
+// private RMContext context = new RMContextImpl(new MemStore(), null, null,
+// null);
+//
+// private Configuration conf = new Configuration();
+//
+// private class DummyEventHandler implements EventHandler<ApplicationEvent> {
+// @Override
+// public void handle(ApplicationEvent appEvent) {
+// ApplicationEventType event = appEvent.getType();
+// switch (event) {
+// case FINISH:
+// synchronized(doneLaunching) {
+// doneLaunching.notify();
+// }
+// break;
+//
+// default:
+// break;
+// }
+// }
+// }
+//
+// private class DummyLaunch implements Runnable {
+// public void run() {
+// launched.incrementAndGet();
+// }
+// }
+//
+// private class DummyCleanUp implements Runnable {
+// private EventHandler eventHandler;
+//
+// public DummyCleanUp(EventHandler eventHandler) {
+// this.eventHandler = eventHandler;
+// }
+// public void run() {
+// cleanedUp.incrementAndGet();
+// eventHandler.handle(new AMFinishEvent(null,
+// ApplicationState.COMPLETED, "", ""));
+// }
+// }
+//
+// private class DummyApplicationMasterLauncher extends
+// ApplicationMasterLauncher {
+// private EventHandler eventHandler;
+//
+// public DummyApplicationMasterLauncher(
+// ApplicationTokenSecretManager applicationTokenSecretManager,
+// ClientToAMSecretManager clientToAMSecretManager,
+// EventHandler eventHandler) {
+// super(applicationTokenSecretManager, clientToAMSecretManager, context);
+// this.eventHandler = eventHandler;
+// }
+//
+// @Override
+// protected Runnable createRunnableLauncher(RMAppAttempt application,
+// AMLauncherEventType event) {
+// Runnable r = null;
+// switch (event) {
+// case LAUNCH:
+// r = new DummyLaunch();
+// break;
+// case CLEANUP:
+// r = new DummyCleanUp(eventHandler);
+// default:
+// break;
+// }
+// return r;
+// }
+// }
+//
+// @Before
+// public void setUp() {
+// asmHandle = new DummyEventHandler();
+// amLauncher = new DummyApplicationMasterLauncher(applicationTokenSecretManager,
+// clientToAMSecretManager, asmHandle);
+// context.getDispatcher().init(conf);
+// amLauncher.init(conf);
+// context.getDispatcher().start();
+// amLauncher.start();
+//
+// }
+//
+// @After
+// public void tearDown() {
+// amLauncher.stop();
+// }
+//
+// @Test
+// public void testAMLauncher() throws Exception {
+//
+// // Creat AppId
+// ApplicationId appId = recordFactory
+// .newRecordInstance(ApplicationId.class);
+// appId.setClusterTimestamp(System.currentTimeMillis());
+// appId.setId(1);
+//
+// ApplicationAttemptId appAttemptId = Records
+// .newRecord(ApplicationAttemptId.class);
+// appAttemptId.setApplicationId(appId);
+// appAttemptId.setAttemptId(1);
+//
+// // Create submissionContext
+// ApplicationSubmissionContext submissionContext = recordFactory
+// .newRecordInstance(ApplicationSubmissionContext.class);
+// submissionContext.setApplicationId(appId);
+// submissionContext.setUser("dummyuser");
+//
+// RMAppAttempt appAttempt = new RMAppAttemptImpl(appAttemptId,
+// "dummyclienttoken", context, null, submissionContext);
+//
+// // Tell AMLauncher to launch the appAttempt
+// amLauncher.handle(new AMLauncherEvent(AMLauncherEventType.LAUNCH,
+// appAttempt));
+//
+// // Tell AMLauncher to cleanup the appAttempt
+// amLauncher.handle(new AMLauncherEvent(AMLauncherEventType.CLEANUP,
+// appAttempt));
+//
+// synchronized (doneLaunching) {
+// doneLaunching.wait(10000);
+// }
+// Assert.assertEquals(1, launched.get());
+// Assert.assertEquals(1, cleanedUp.get());
+// }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
new file mode 100644
index 0000000..459db70
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
@@ -0,0 +1,202 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationMaster;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store.RMState;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestSchedulerNegotiator {
+// private static RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+// private SchedulerNegotiator schedulerNegotiator;
+// private DummyScheduler scheduler;
+// private final int testNum = 99999;
+//
+// private final RMContext context = new RMContextImpl(new MemStore());
+// AppAttemptImpl masterInfo;
+// private EventHandler handler;
+// private Configuration conf = new Configuration();
+// private class DummyScheduler implements ResourceScheduler {
+// @Override
+// public Allocation allocate(ApplicationId applicationId,
+// List<ResourceRequest> ask, List<Container> release) throws IOException {
+// ArrayList<Container> containers = new ArrayList<Container>();
+// Container container = recordFactory.newRecordInstance(Container.class);
+// container.setId(recordFactory.newRecordInstance(ContainerId.class));
+// container.getId().setAppId(applicationId);
+// container.getId().setId(testNum);
+// containers.add(container);
+// return new Allocation(containers, Resources.none());
+// }
+//
+//
+// @Override
+// public void nodeUpdate(RMNode nodeInfo,
+// Map<String, List<Container>> containers) {
+// }
+//
+// @Override
+// public void removeNode(RMNode node) {
+// }
+//
+// @Override
+// public void handle(ASMEvent<ApplicationTrackerEventType> event) {
+// }
+//
+// @Override
+// public QueueInfo getQueueInfo(String queueName,
+// boolean includeChildQueues,
+// boolean recursive) throws IOException {
+// return null;
+// }
+// @Override
+// public List<QueueUserACLInfo> getQueueUserAclInfo() {
+// return null;
+// }
+// @Override
+// public void addApplication(ApplicationId applicationId,
+// ApplicationMaster master, String user, String queue, Priority priority,
+// ApplicationStore store)
+// throws IOException {
+// }
+//
+//
+// @Override
+// public void addNode(RMNode nodeInfo) {
+// }
+//
+//
+// @Override
+// public void recover(RMState state) throws Exception {
+// }
+//
+//
+// @Override
+// public void reinitialize(Configuration conf,
+// ContainerTokenSecretManager secretManager, RMContext rmContext)
+// throws IOException {
+// }
+//
+//
+// @Override
+// public Resource getMaximumResourceCapability() {
+// // TODO Auto-generated method stub
+// return null;
+// }
+//
+//
+// @Override
+// public Resource getMinimumResourceCapability() {
+// // TODO Auto-generated method stub
+// return null;
+// }
+// }
+//
+// @Before
+// public void setUp() {
+// scheduler = new DummyScheduler();
+// schedulerNegotiator = new SchedulerNegotiator(context, scheduler);
+// schedulerNegotiator.init(conf);
+// schedulerNegotiator.start();
+// handler = context.getDispatcher().getEventHandler();
+// context.getDispatcher().init(conf);
+// context.getDispatcher().start();
+// }
+//
+// @After
+// public void tearDown() {
+// schedulerNegotiator.stop();
+// }
+//
+// public void waitForState(ApplicationState state, AppAttemptImpl info) {
+// int count = 0;
+// while (info.getState() != state && count < 100) {
+// try {
+// Thread.sleep(50);
+// } catch (InterruptedException e) {
+// e.printStackTrace();
+// }
+// count++;
+// }
+// Assert.assertEquals(state, info.getState());
+// }
+//
+// private class DummyEventHandler implements EventHandler<ASMEvent<AMLauncherEventType>> {
+// @Override
+// public void handle(ASMEvent<AMLauncherEventType> event) {
+// }
+// }
+//
+// @Test
+// public void testSchedulerNegotiator() throws Exception {
+// ApplicationSubmissionContext submissionContext = recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
+// submissionContext.setApplicationId(recordFactory.newRecordInstance(ApplicationId.class));
+// submissionContext.getApplicationId().setClusterTimestamp(System.currentTimeMillis());
+// submissionContext.getApplicationId().setId(1);
+//
+// masterInfo = new AppAttemptImpl(this.context, this.conf, "dummy",
+// submissionContext, "dummyClientToken", StoreFactory
+// .createVoidAppStore(), new AMLivelinessMonitor(context
+// .getDispatcher().getEventHandler()));
+// context.getDispatcher().register(ApplicationEventType.class, masterInfo);
+// context.getDispatcher().register(ApplicationTrackerEventType.class, scheduler);
+// context.getDispatcher().register(AMLauncherEventType.class,
+// new DummyEventHandler());
+// handler.handle(new ApplicationEvent(
+// ApplicationEventType.ALLOCATE, submissionContext.getApplicationId()));
+// waitForState(ApplicationState.LAUNCHING, masterInfo); // LAUNCHING because ALLOCATED automatically movesto LAUNCHING for now.
+// Container container = masterInfo.getMasterContainer();
+// Assert.assertTrue(container.getId().getId() == testNum);
+// }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/InlineDispatcher.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/InlineDispatcher.java
new file mode 100644
index 0000000..4f1e5cb
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/InlineDispatcher.java
@@ -0,0 +1,32 @@
+package org.apache.hadoop.yarn.server.resourcemanager.resourcetracker;
+
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.event.Event;
+import org.apache.hadoop.yarn.event.EventHandler;
+
+class InlineDispatcher extends AsyncDispatcher {
+ private class InlineEventHandler implements EventHandler {
+ private final InlineDispatcher dispatcher;
+ public InlineEventHandler(InlineDispatcher dispatcher) {
+ this.dispatcher = dispatcher;
+ }
+ @Override
+ public void handle(Event event) {
+ this.dispatcher.dispatch(event);
+ }
+ }
+ public void dispatch(Event event) {
+ super.dispatch(event);
+ }
+ @Override
+ public EventHandler getEventHandler() {
+ return new InlineEventHandler(this);
+ }
+
+ static class EmptyEventHandler implements EventHandler<Event> {
+ @Override
+ public void handle(Event event) {
+ ; // ignore
+ }
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java
new file mode 100644
index 0000000..5885d95a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java
@@ -0,0 +1,194 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.resourcetracker;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.records.RegistrationResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.NMLivelinessMonitor;
+import org.apache.hadoop.yarn.server.resourcemanager.NodesListManager;
+import org.apache.hadoop.yarn.server.resourcemanager.RMConfig;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
+import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
+import org.apache.hadoop.yarn.util.Records;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestNMExpiry {
+ private static final Log LOG = LogFactory.getLog(TestNMExpiry.class);
+ private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+
+ ResourceTrackerService resourceTrackerService;
+ ContainerTokenSecretManager containerTokenSecretManager =
+ new ContainerTokenSecretManager();
+ AtomicInteger test = new AtomicInteger();
+ AtomicInteger notify = new AtomicInteger();
+
+ private class TestNmLivelinessMonitor extends NMLivelinessMonitor {
+ public TestNmLivelinessMonitor(Dispatcher dispatcher) {
+ super(dispatcher);
+ }
+
+ @Override
+ public void init(Configuration conf) {
+ conf.setLong(RMConfig.NM_EXPIRY_INTERVAL, 1000);
+ super.init(conf);
+ }
+ @Override
+ protected void expire(NodeId id) {
+ LOG.info("Expired " + id);
+ if (test.addAndGet(1) == 2) {
+ try {
+ /* delay atleast 2 seconds to make sure the 3rd one does not expire
+ *
+ */
+ Thread.sleep(2000);
+ } catch(InterruptedException ie){}
+ synchronized(notify) {
+ notify.addAndGet(1);
+ notify.notifyAll();
+ }
+ }
+ }
+ }
+
+ @Before
+ public void setUp() {
+ Configuration conf = new Configuration();
+ // Dispatcher that processes events inline
+ Dispatcher dispatcher = new InlineDispatcher();
+ dispatcher.register(SchedulerEventType.class,
+ new InlineDispatcher.EmptyEventHandler());
+ dispatcher.register(RMNodeEventType.class,
+ new InlineDispatcher.EmptyEventHandler());
+ RMContext context = new RMContextImpl(new MemStore(), dispatcher, null,
+ null);
+ NMLivelinessMonitor nmLivelinessMonitor = new TestNmLivelinessMonitor(
+ dispatcher);
+ nmLivelinessMonitor.init(conf);
+ nmLivelinessMonitor.start();
+ NodesListManager nodesListManager = new NodesListManager();
+ nodesListManager.init(conf);
+ resourceTrackerService = new ResourceTrackerService(context,
+ nodesListManager, nmLivelinessMonitor, containerTokenSecretManager);
+
+ resourceTrackerService.init(conf);
+ resourceTrackerService.start();
+ }
+
+ private class ThirdNodeHeartBeatThread extends Thread {
+ public void run() {
+ int lastResponseID = 0;
+ while (!stopT) {
+ try {
+ org.apache.hadoop.yarn.server.api.records.NodeStatus nodeStatus =
+ recordFactory
+ .newRecordInstance(org.apache.hadoop.yarn.server.api.records.NodeStatus.class);
+ nodeStatus.setNodeId(request3.getNodeId());
+ nodeStatus.setResponseId(lastResponseID);
+ nodeStatus.setNodeHealthStatus(recordFactory.newRecordInstance(NodeHealthStatus.class));
+ nodeStatus.getNodeHealthStatus().setIsNodeHealthy(true);
+
+ NodeHeartbeatRequest request = recordFactory
+ .newRecordInstance(NodeHeartbeatRequest.class);
+ request.setNodeStatus(nodeStatus);
+ lastResponseID = resourceTrackerService.nodeHeartbeat(request)
+ .getHeartbeatResponse().getResponseId();
+
+ Thread.sleep(1000);
+ } catch(Exception e) {
+ LOG.info("failed to heartbeat ", e);
+ }
+ }
+ }
+ }
+
+ boolean stopT = false;
+ RegisterNodeManagerRequest request3;
+
+ @Test
+ public void testNMExpiry() throws Exception {
+ String hostname1 = "localhost1";
+ String hostname2 = "localhost2";
+ String hostname3 = "localhost3";
+ Resource capability = recordFactory.newRecordInstance(Resource.class);
+
+ RegisterNodeManagerRequest request1 = recordFactory
+ .newRecordInstance(RegisterNodeManagerRequest.class);
+ NodeId nodeId1 = Records.newRecord(NodeId.class);
+ nodeId1.setPort(0);
+ nodeId1.setHost(hostname1);
+ request1.setNodeId(nodeId1);
+ request1.setHttpPort(0);
+ request1.setResource(capability);
+ resourceTrackerService.registerNodeManager(request1);
+
+ RegisterNodeManagerRequest request2 = recordFactory
+ .newRecordInstance(RegisterNodeManagerRequest.class);
+ NodeId nodeId2 = Records.newRecord(NodeId.class);
+ nodeId2.setPort(0);
+ nodeId2.setHost(hostname2);
+ request2.setNodeId(nodeId2);
+ request2.setHttpPort(0);
+ request2.setResource(capability);
+ resourceTrackerService.registerNodeManager(request2);
+
+ request3 = recordFactory
+ .newRecordInstance(RegisterNodeManagerRequest.class);
+ NodeId nodeId3 = Records.newRecord(NodeId.class);
+ nodeId3.setPort(0);
+ nodeId3.setHost(hostname3);
+ request3.setNodeId(nodeId3);
+ request3.setHttpPort(0);
+ request3.setResource(capability);
+ RegistrationResponse thirdNodeRegResponse = resourceTrackerService
+ .registerNodeManager(request3).getRegistrationResponse();
+
+ /* test to see if hostanme 3 does not expire */
+ stopT = false;
+ new ThirdNodeHeartBeatThread().start();
+ int timeOut = 0;
+ synchronized (notify) {
+ while (notify.get() == 0 && timeOut++ < 30) {
+ notify.wait(1000);
+ }
+ }
+ Assert.assertEquals(2, test.get());
+
+ stopT = true;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java
new file mode 100644
index 0000000..5cdd3b3
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java
@@ -0,0 +1,135 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.resourcetracker;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.Event;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.NMLivelinessMonitor;
+import org.apache.hadoop.yarn.server.resourcemanager.NodesListManager;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
+import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
+import org.apache.hadoop.yarn.util.Records;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestRMNMRPCResponseId {
+ private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+ ResourceTrackerService resourceTrackerService;
+ ContainerTokenSecretManager containerTokenSecretManager =
+ new ContainerTokenSecretManager();
+ private NodeId nodeId;
+
+ @Before
+ public void setUp() {
+ // Dispatcher that processes events inline
+ Dispatcher dispatcher = new InlineDispatcher();
+ dispatcher.register(SchedulerEventType.class, new EventHandler<Event>() {
+ @Override
+ public void handle(Event event) {
+ ; // ignore
+ }
+ });
+ RMContext context = new RMContextImpl(new MemStore(), dispatcher, null,
+ null);
+ dispatcher.register(RMNodeEventType.class,
+ new ResourceManager.NodeEventDispatcher(context));
+ NodesListManager nodesListManager = new NodesListManager();
+ Configuration conf = new Configuration();
+ nodesListManager.init(conf);
+ resourceTrackerService = new ResourceTrackerService(context,
+ nodesListManager, new NMLivelinessMonitor(dispatcher),
+ containerTokenSecretManager);
+ resourceTrackerService.init(conf);
+ }
+
+ @After
+ public void tearDown() {
+ /* do nothing */
+ }
+
+ @Test
+ public void testRPCResponseId() throws IOException {
+ String node = "localhost";
+ Resource capability = recordFactory.newRecordInstance(Resource.class);
+ RegisterNodeManagerRequest request = recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
+ nodeId = Records.newRecord(NodeId.class);
+ nodeId.setHost(node);
+ nodeId.setPort(1234);
+ request.setNodeId(nodeId);
+ request.setHttpPort(0);
+ request.setResource(capability);
+
+ RegisterNodeManagerRequest request1 = recordFactory
+ .newRecordInstance(RegisterNodeManagerRequest.class);
+ request1.setNodeId(nodeId);
+ request1.setHttpPort(0);
+ request1.setResource(capability);
+ resourceTrackerService.registerNodeManager(request1);
+
+ org.apache.hadoop.yarn.server.api.records.NodeStatus nodeStatus = recordFactory.
+ newRecordInstance(org.apache.hadoop.yarn.server.api.records.NodeStatus.class);
+ nodeStatus.setNodeId(nodeId);
+ NodeHealthStatus nodeHealthStatus = recordFactory.newRecordInstance(NodeHealthStatus.class);
+ nodeHealthStatus.setIsNodeHealthy(true);
+ nodeStatus.setNodeHealthStatus(nodeHealthStatus);
+ NodeHeartbeatRequest nodeHeartBeatRequest = recordFactory
+ .newRecordInstance(NodeHeartbeatRequest.class);
+ nodeHeartBeatRequest.setNodeStatus(nodeStatus);
+
+ nodeStatus.setResponseId(0);
+ HeartbeatResponse response = resourceTrackerService.nodeHeartbeat(
+ nodeHeartBeatRequest).getHeartbeatResponse();
+ Assert.assertTrue(response.getResponseId() == 1);
+
+ nodeStatus.setResponseId(response.getResponseId());
+ response = resourceTrackerService.nodeHeartbeat(nodeHeartBeatRequest)
+ .getHeartbeatResponse();
+ Assert.assertTrue(response.getResponseId() == 2);
+
+ /* try calling with less response id */
+ response = resourceTrackerService.nodeHeartbeat(nodeHeartBeatRequest)
+ .getHeartbeatResponse();
+ Assert.assertTrue(response.getResponseId() == 2);
+
+ nodeStatus.setResponseId(0);
+ response = resourceTrackerService.nodeHeartbeat(nodeHeartBeatRequest)
+ .getHeartbeatResponse();
+ Assert.assertTrue(response.getReboot() == true);
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
new file mode 100644
index 0000000..a489ced
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
@@ -0,0 +1,403 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
+
+import static org.mockito.Mockito.*;
+
+import java.io.IOException;
+import java.lang.reflect.Method;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.MockApps;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+
+public class TestRMAppTransitions {
+ private static final Log LOG = LogFactory.getLog(TestRMAppTransitions.class);
+
+ private RMContext rmContext;
+ private static int maxRetries = 4;
+ private static int appId = 1;
+
+ // ignore all the RM application attempt events
+ private static final class TestApplicationAttemptEventDispatcher implements
+ EventHandler<RMAppAttemptEvent> {
+
+ public TestApplicationAttemptEventDispatcher() {
+ }
+
+ @Override
+ public void handle(RMAppAttemptEvent event) {
+ }
+ }
+
+ // handle all the RM application events - same as in ResourceManager.java
+ private static final class TestApplicationEventDispatcher implements
+ EventHandler<RMAppEvent> {
+
+ private final RMContext rmContext;
+ public TestApplicationEventDispatcher(RMContext rmContext) {
+ this.rmContext = rmContext;
+ }
+
+ @Override
+ public void handle(RMAppEvent event) {
+ ApplicationId appID = event.getApplicationId();
+ RMApp rmApp = this.rmContext.getRMApps().get(appID);
+ if (rmApp != null) {
+ try {
+ rmApp.handle(event);
+ } catch (Throwable t) {
+ LOG.error("Error in handling event type " + event.getType()
+ + " for application " + appID, t);
+ }
+ }
+ }
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ Configuration conf = new Configuration();
+ Dispatcher rmDispatcher = new AsyncDispatcher();
+
+ ContainerAllocationExpirer containerAllocationExpirer = mock(ContainerAllocationExpirer.class);
+ AMLivelinessMonitor amLivelinessMonitor = mock(AMLivelinessMonitor.class);
+ this.rmContext = new RMContextImpl(new MemStore(), rmDispatcher,
+ containerAllocationExpirer, amLivelinessMonitor);
+
+ rmDispatcher.register(RMAppAttemptEventType.class,
+ new TestApplicationAttemptEventDispatcher());
+
+ rmDispatcher.register(RMAppEventType.class,
+ new TestApplicationEventDispatcher(rmContext));
+ }
+
+ protected RMApp createNewTestApp() {
+ ApplicationId applicationId = MockApps.newAppID(appId++);
+ String user = MockApps.newUserName();
+ String name = MockApps.newAppName();
+ String queue = MockApps.newQueue();
+ Configuration conf = new YarnConfiguration();
+ // ensure max retries set to known value
+ conf.setInt("yarn.server.resourcemanager.application.max.retries", maxRetries);
+ ApplicationSubmissionContext submissionContext = null;
+ String clientTokenStr = "bogusstring";
+ ApplicationStore appStore = mock(ApplicationStore.class);
+ YarnScheduler scheduler = mock(YarnScheduler.class);
+ ApplicationMasterService masterService = new ApplicationMasterService(rmContext,
+ new ApplicationTokenSecretManager(), scheduler);
+
+ RMApp application = new RMAppImpl(applicationId, rmContext,
+ conf, name, user,
+ queue, submissionContext, clientTokenStr,
+ appStore, rmContext.getAMLivelinessMonitor(), scheduler,
+ masterService);
+
+ testAppStartState(applicationId, user, name, queue, application);
+ return application;
+ }
+
+ // Test expected newly created app state
+ private static void testAppStartState(ApplicationId applicationId, String user,
+ String name, String queue, RMApp application) {
+ Assert.assertTrue("application start time is not greater then 0",
+ application.getStartTime() > 0);
+ Assert.assertTrue("application start time is before currentTime",
+ application.getStartTime() <= System.currentTimeMillis());
+ Assert.assertEquals("application user is not correct",
+ user, application.getUser());
+ Assert.assertEquals("application id is not correct",
+ applicationId, application.getApplicationId());
+ Assert.assertEquals("application progress is not correct",
+ (float)0.0, application.getProgress());
+ Assert.assertEquals("application queue is not correct",
+ queue, application.getQueue());
+ Assert.assertEquals("application name is not correct",
+ name, application.getName());
+ Assert.assertEquals("application finish time is not 0 and should be",
+ 0, application.getFinishTime());
+ Assert.assertEquals("application tracking url is not correct",
+ null, application.getTrackingUrl());
+ StringBuilder diag = application.getDiagnostics();
+ Assert.assertEquals("application diagnostics is not correct",
+ 0, diag.length());
+ }
+
+ // test to make sure times are set when app finishes
+ private static void assertStartTimeSet(RMApp application) {
+ Assert.assertTrue("application start time is not greater then 0",
+ application.getStartTime() > 0);
+ Assert.assertTrue("application start time is before currentTime",
+ application.getStartTime() <= System.currentTimeMillis());
+ }
+
+ private static void assertAppState(RMAppState state, RMApp application) {
+ Assert.assertEquals("application state should have been" + state,
+ state, application.getState());
+ }
+
+ // test to make sure times are set when app finishes
+ private static void assertTimesAtFinish(RMApp application) {
+ assertStartTimeSet(application);
+ Assert.assertTrue("application finish time is not greater then 0",
+ (application.getFinishTime() > 0));
+ Assert.assertTrue("application finish time is not >= then start time",
+ (application.getFinishTime() >= application.getStartTime()));
+ }
+
+ private static void assertKilled(RMApp application) {
+ assertTimesAtFinish(application);
+ assertAppState(RMAppState.KILLED, application);
+ StringBuilder diag = application.getDiagnostics();
+ Assert.assertEquals("application diagnostics is not correct",
+ "Application killed by user.", diag.toString());
+ }
+
+ private static void assertFailed(RMApp application, String regex) {
+ assertTimesAtFinish(application);
+ assertAppState(RMAppState.FAILED, application);
+ StringBuilder diag = application.getDiagnostics();
+ Assert.assertTrue("application diagnostics is not correct",
+ diag.toString().matches(regex));
+ }
+
+ protected RMApp testCreateAppSubmitted() throws IOException {
+ RMApp application = createNewTestApp();
+ // NEW => SUBMITTED event RMAppEventType.START
+ RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.START);
+ application.handle(event);
+ assertStartTimeSet(application);
+ assertAppState(RMAppState.SUBMITTED, application);
+ return application;
+ }
+
+ protected RMApp testCreateAppAccepted() throws IOException {
+ RMApp application = testCreateAppSubmitted();
+ // SUBMITTED => ACCEPTED event RMAppEventType.APP_ACCEPTED
+ RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.APP_ACCEPTED);
+ application.handle(event);
+ assertStartTimeSet(application);
+ assertAppState(RMAppState.ACCEPTED, application);
+ return application;
+ }
+
+ protected RMApp testCreateAppRunning() throws IOException {
+ RMApp application = testCreateAppAccepted();
+ // ACCEPTED => RUNNING event RMAppEventType.ATTEMPT_REGISTERED
+ RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_REGISTERED);
+ application.handle(event);
+ assertStartTimeSet(application);
+ assertAppState(RMAppState.RUNNING, application);
+ return application;
+ }
+
+ protected RMApp testCreateAppFinished() throws IOException {
+ RMApp application = testCreateAppRunning();
+ // RUNNING => FINISHED event RMAppEventType.ATTEMPT_FINISHED
+ RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_FINISHED);
+ application.handle(event);
+ assertAppState(RMAppState.FINISHED, application);
+ assertTimesAtFinish(application);
+ return application;
+ }
+
+ @Test
+ public void testAppSuccessPath() throws IOException {
+ LOG.info("--- START: testAppSuccessPath ---");
+ testCreateAppFinished();
+ }
+
+ @Test
+ public void testAppNewKill() throws IOException {
+ LOG.info("--- START: testAppNewKill ---");
+
+ RMApp application = createNewTestApp();
+ // NEW => KILLED event RMAppEventType.KILL
+ RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
+ application.handle(event);
+ assertKilled(application);
+ }
+
+ @Test
+ public void testAppSubmittedRejected() throws IOException {
+ LOG.info("--- START: testAppSubmittedRejected ---");
+
+ RMApp application = testCreateAppSubmitted();
+ // SUBMITTED => FAILED event RMAppEventType.APP_REJECTED
+ String rejectedText = "app rejected";
+ RMAppEvent event = new RMAppRejectedEvent(application.getApplicationId(), rejectedText);
+ application.handle(event);
+ assertFailed(application, rejectedText);
+ }
+
+ @Test
+ public void testAppSubmittedKill() throws IOException {
+ LOG.info("--- START: testAppSubmittedKill---");
+
+ RMApp application = testCreateAppAccepted();
+ // SUBMITTED => KILLED event RMAppEventType.KILL
+ RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
+ application.handle(event);
+ assertKilled(application);
+ }
+
+ @Test
+ public void testAppAcceptedFailed() throws IOException {
+ LOG.info("--- START: testAppAcceptedFailed ---");
+
+ RMApp application = testCreateAppAccepted();
+ // ACCEPTED => ACCEPTED event RMAppEventType.RMAppEventType.ATTEMPT_FAILED
+ for (int i=1; i<maxRetries; i++) {
+ RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_FAILED);
+ application.handle(event);
+ assertAppState(RMAppState.ACCEPTED, application);
+ }
+
+ // ACCEPTED => FAILED event RMAppEventType.RMAppEventType.ATTEMPT_FAILED after max retries
+ RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_FAILED);
+ application.handle(event);
+ assertFailed(application, ".*Failing the application.*");
+ }
+
+ @Test
+ public void testAppAcceptedKill() throws IOException {
+ LOG.info("--- START: testAppAcceptedKill ---");
+
+ RMApp application = testCreateAppAccepted();
+ // ACCEPTED => KILLED event RMAppEventType.KILL
+ RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
+ application.handle(event);
+ assertKilled(application);
+ }
+
+ @Test
+ public void testAppRunningKill() throws IOException {
+ LOG.info("--- START: testAppRunningKill ---");
+
+ RMApp application = testCreateAppRunning();
+ // RUNNING => KILLED event RMAppEventType.KILL
+ RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
+ application.handle(event);
+ assertKilled(application);
+ }
+
+ @Test
+ public void testAppRunningFailed() throws IOException {
+ LOG.info("--- START: testAppRunningFailed ---");
+
+ RMApp application = testCreateAppRunning();
+ // RUNNING => FAILED/RESTARTING event RMAppEventType.ATTEMPT_FAILED
+ for (int i=1; i<maxRetries; i++) {
+ RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_FAILED);
+ application.handle(event);
+ assertAppState(RMAppState.RUNNING, application);
+ }
+
+ // RUNNING => FAILED/RESTARTING event RMAppEventType.ATTEMPT_FAILED after max retries
+ RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_FAILED);
+ application.handle(event);
+ assertFailed(application, ".*Failing the application.*");
+
+ // FAILED => FAILED event RMAppEventType.KILL
+ event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
+ application.handle(event);
+ assertFailed(application, ".*Failing the application.*");
+ }
+
+
+ @Test
+ public void testAppFinishedFinished() throws IOException {
+ LOG.info("--- START: testAppFinishedFinished ---");
+
+ RMApp application = testCreateAppFinished();
+ // FINISHED => FINISHED event RMAppEventType.KILL
+ RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
+ application.handle(event);
+ assertTimesAtFinish(application);
+ assertAppState(RMAppState.FINISHED, application);
+ StringBuilder diag = application.getDiagnostics();
+ Assert.assertEquals("application diagnostics is not correct",
+ "", diag.toString());
+ }
+
+ @Test
+ public void testAppKilledKilled() throws IOException {
+ LOG.info("--- START: testAppKilledKilled ---");
+
+ RMApp application = testCreateAppRunning();
+
+ // RUNNING => KILLED event RMAppEventType.KILL
+ RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
+ application.handle(event);
+ assertTimesAtFinish(application);
+ assertAppState(RMAppState.KILLED, application);
+
+ // KILLED => KILLED event RMAppEventType.ATTEMPT_FINISHED
+ event = new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_FINISHED);
+ application.handle(event);
+ assertTimesAtFinish(application);
+ assertAppState(RMAppState.KILLED, application);
+
+ // KILLED => KILLED event RMAppEventType.ATTEMPT_FAILED
+ event = new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_FAILED);
+ application.handle(event);
+ assertTimesAtFinish(application);
+ assertAppState(RMAppState.KILLED, application);
+
+ // KILLED => KILLED event RMAppEventType.ATTEMPT_KILLED
+ event = new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_KILLED);
+ application.handle(event);
+ assertTimesAtFinish(application);
+ assertAppState(RMAppState.KILLED, application);
+
+ // KILLED => KILLED event RMAppEventType.KILL
+ event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
+ application.handle(event);
+ assertTimesAtFinish(application);
+ assertAppState(RMAppState.KILLED, application);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java
new file mode 100644
index 0000000..5c77429
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java
@@ -0,0 +1,198 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import static org.apache.hadoop.test.MetricsAsserts.*;
+import static org.apache.hadoop.test.MockitoMaker.*;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.*;
+
+public class TestQueueMetrics {
+ static final int GB = 1024; // MB
+
+ final MetricsSystem ms = new MetricsSystemImpl();
+
+ @Test public void testDefaultSingleQueueMetrics() {
+ String queueName = "single";
+ String user = "alice";
+
+ QueueMetrics metrics = QueueMetrics.forQueue(ms, queueName, null, false);
+ MetricsSource queueSource= queueSource(ms, queueName);
+ AppSchedulingInfo app = mockApp(user);
+
+ metrics.submitApp(user);
+ MetricsSource userSource = userSource(ms, queueName, user);
+ checkApps(queueSource, 1, 1, 0, 0, 0, 0);
+
+ metrics.setAvailableResourcesToQueue(Resource.createResource(100*GB));
+ metrics.incrPendingResources(user, 5, Resources.createResource(15*GB));
+ // Available resources is set externally, as it depends on dynamic
+ // configurable cluster/queue resources
+ checkResources(queueSource, 0, 0, 100, 15, 5, 0, 0);
+
+ metrics.incrAppsRunning(user);
+ checkApps(queueSource, 1, 0, 1, 0, 0, 0);
+
+ metrics.allocateResources(user, 3, Resources.createResource(2*GB));
+ checkResources(queueSource, 6, 3, 100, 9, 2, 0, 0);
+
+ metrics.releaseResources(user, 1, Resources.createResource(2*GB));
+ checkResources(queueSource, 4, 2, 100, 9, 2, 0, 0);
+
+ metrics.finishApp(app, RMAppAttemptState.FINISHED);
+ checkApps(queueSource, 1, 0, 0, 1, 0, 0);
+ assertNull(userSource);
+ }
+
+ @Test public void testSingleQueueWithUserMetrics() {
+ String queueName = "single2";
+ String user = "dodo";
+
+ QueueMetrics metrics = QueueMetrics.forQueue(ms, queueName, null, true);
+ MetricsSource queueSource = queueSource(ms, queueName);
+ AppSchedulingInfo app = mockApp(user);
+
+ metrics.submitApp(user);
+ MetricsSource userSource = userSource(ms, queueName, user);
+
+ checkApps(queueSource, 1, 1, 0, 0, 0, 0);
+ checkApps(userSource, 1, 1, 0, 0, 0, 0);
+
+ metrics.setAvailableResourcesToQueue(Resources.createResource(100*GB));
+ metrics.setAvailableResourcesToUser(user, Resources.createResource(10*GB));
+ metrics.incrPendingResources(user, 5, Resources.createResource(15*GB));
+ // Available resources is set externally, as it depends on dynamic
+ // configurable cluster/queue resources
+ checkResources(queueSource, 0, 0, 100, 15, 5, 0, 0);
+ checkResources(userSource, 0, 0, 10, 15, 5, 0, 0);
+
+ metrics.incrAppsRunning(user);
+ checkApps(queueSource, 1, 0, 1, 0, 0, 0);
+ checkApps(userSource, 1, 0, 1, 0, 0, 0);
+
+ metrics.allocateResources(user, 3, Resources.createResource(2*GB));
+ checkResources(queueSource, 6, 3, 100, 9, 2, 0, 0);
+ checkResources(userSource, 6, 3, 10, 9, 2, 0, 0);
+
+ metrics.releaseResources(user, 1, Resources.createResource(2*GB));
+ checkResources(queueSource, 4, 2, 100, 9, 2, 0, 0);
+ checkResources(userSource, 4, 2, 10, 9, 2, 0, 0);
+
+ metrics.finishApp(app, RMAppAttemptState.FINISHED);
+ checkApps(queueSource, 1, 0, 0, 1, 0, 0);
+ checkApps(userSource, 1, 0, 0, 1, 0, 0);
+ }
+
+ @Test public void testTwoLevelWithUserMetrics() {
+ String parentQueueName = "root";
+ String leafQueueName = "root.leaf";
+ String user = "alice";
+
+ QueueMetrics parentMetrics =
+ QueueMetrics.forQueue(ms, parentQueueName, null, true);
+ Queue parentQueue = make(stub(Queue.class).returning(parentMetrics).
+ from.getMetrics());
+ QueueMetrics metrics =
+ QueueMetrics.forQueue(ms, leafQueueName, parentQueue, true);
+ MetricsSource parentQueueSource = queueSource(ms, parentQueueName);
+ MetricsSource queueSource = queueSource(ms, leafQueueName);
+ AppSchedulingInfo app = mockApp(user);
+
+ metrics.submitApp(user);
+ MetricsSource userSource = userSource(ms, leafQueueName, user);
+ MetricsSource parentUserSource = userSource(ms, parentQueueName, user);
+
+ checkApps(queueSource, 1, 1, 0, 0, 0, 0);
+ checkApps(parentQueueSource, 1, 1, 0, 0, 0, 0);
+ checkApps(userSource, 1, 1, 0, 0, 0, 0);
+ checkApps(parentUserSource, 1, 1, 0, 0, 0, 0);
+
+ parentMetrics.setAvailableResourcesToQueue(Resources.createResource(100*GB));
+ metrics.setAvailableResourcesToQueue(Resources.createResource(100*GB));
+ parentMetrics.setAvailableResourcesToUser(user, Resources.createResource(10*GB));
+ metrics.setAvailableResourcesToUser(user, Resources.createResource(10*GB));
+ metrics.incrPendingResources(user, 5, Resources.createResource(15*GB));
+ checkResources(queueSource, 0, 0, 100, 15, 5, 0, 0);
+ checkResources(parentQueueSource, 0, 0, 100, 15, 5, 0, 0);
+ checkResources(userSource, 0, 0, 10, 15, 5, 0, 0);
+ checkResources(parentUserSource, 0, 0, 10, 15, 5, 0, 0);
+
+ metrics.incrAppsRunning(user);
+ checkApps(queueSource, 1, 0, 1, 0, 0, 0);
+ checkApps(userSource, 1, 0, 1, 0, 0, 0);
+
+ metrics.allocateResources(user, 3, Resources.createResource(2*GB));
+ metrics.reserveResource(user, Resources.createResource(3*GB));
+ // Available resources is set externally, as it depends on dynamic
+ // configurable cluster/queue resources
+ checkResources(queueSource, 6, 3, 100, 9, 2, 3, 1);
+ checkResources(parentQueueSource, 6, 3, 100, 9, 2, 3, 1);
+ checkResources(userSource, 6, 3, 10, 9, 2, 3, 1);
+ checkResources(parentUserSource, 6, 3, 10, 9, 2, 3, 1);
+
+ metrics.releaseResources(user, 1, Resources.createResource(2*GB));
+ metrics.unreserveResource(user, Resources.createResource(3*GB));
+ checkResources(queueSource, 4, 2, 100, 9, 2, 0, 0);
+ checkResources(parentQueueSource, 4, 2, 100, 9, 2, 0, 0);
+ checkResources(userSource, 4, 2, 10, 9, 2, 0, 0);
+ checkResources(parentUserSource, 4, 2, 10, 9, 2, 0, 0);
+
+ metrics.finishApp(app, RMAppAttemptState.FINISHED);
+ checkApps(queueSource, 1, 0, 0, 1, 0, 0);
+ checkApps(parentQueueSource, 1, 0, 0, 1, 0, 0);
+ checkApps(userSource, 1, 0, 0, 1, 0, 0);
+ checkApps(parentUserSource, 1, 0, 0, 1, 0, 0);
+ }
+
+ public static void checkApps(MetricsSource source, int submitted, int pending,
+ int running, int completed, int failed, int killed) {
+ MetricsRecordBuilder rb = getMetrics(source);
+ assertCounter("AppsSubmitted", submitted, rb);
+ assertGauge("AppsPending", pending, rb);
+ assertGauge("AppsRunning", running, rb);
+ assertCounter("AppsCompleted", completed, rb);
+ assertCounter("AppsFailed", failed, rb);
+ assertCounter("AppsKilled", killed, rb);
+ }
+
+ public static void checkResources(MetricsSource source, int allocGB,
+ int allocCtnrs, int availGB, int pendingGB, int pendingCtnrs,
+ int reservedGB, int reservedCtnrs) {
+ MetricsRecordBuilder rb = getMetrics(source);
+ assertGauge("AllocatedGB", allocGB, rb);
+ assertGauge("AllocatedContainers", allocCtnrs, rb);
+ assertGauge("AvailableGB", availGB, rb);
+ assertGauge("PendingGB", pendingGB, rb);
+ assertGauge("PendingContainers", pendingCtnrs, rb);
+ assertGauge("ReservedGB", reservedGB, rb);
+ assertGauge("ReservedContainers", reservedCtnrs, rb);
+ }
+
+ private static AppSchedulingInfo mockApp(String user) {
+ AppSchedulingInfo app = mock(AppSchedulingInfo.class);
+ when(app.getUser()).thenReturn(user);
+ return app;
+ }
+
+ public static MetricsSource queueSource(MetricsSystem ms, String queue) {
+ MetricsSource s = ms.getSource(QueueMetrics.sourceName(queue).toString());
+ return s;
+ }
+
+ public static MetricsSource userSource(MetricsSystem ms, String queue,
+ String user) {
+ MetricsSource s = ms.getSource(QueueMetrics.sourceName(queue).
+ append(",user=").append(user).toString());
+ return s;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
new file mode 100644
index 0000000..1f4a19b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -0,0 +1,243 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+
+import java.io.IOException;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.server.resourcemanager.Application;
+import org.apache.hadoop.yarn.server.resourcemanager.RMConfig;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.Task;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestCapacityScheduler {
+ private static final Log LOG = LogFactory.getLog(TestCapacityScheduler.class);
+
+ private ResourceManager resourceManager = null;
+
+ @Before
+ public void setUp() throws Exception {
+ Store store = StoreFactory.getStore(new Configuration());
+ resourceManager = new ResourceManager(store);
+ CapacitySchedulerConfiguration csConf =
+ new CapacitySchedulerConfiguration();
+ csConf.setClass(RMConfig.RESOURCE_SCHEDULER,
+ CapacityScheduler.class, ResourceScheduler.class);
+ setupQueueConfiguration(csConf);
+ resourceManager.init(csConf);
+ ((AsyncDispatcher)resourceManager.getRMContext().getDispatcher()).start();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ }
+
+ private org.apache.hadoop.yarn.server.resourcemanager.NodeManager
+ registerNode(String hostName, int containerManagerPort, int httpPort,
+ String rackName, int memory)
+ throws IOException {
+ return new org.apache.hadoop.yarn.server.resourcemanager.NodeManager(
+ hostName, containerManagerPort, httpPort, rackName, memory,
+ resourceManager.getResourceTrackerService(), resourceManager
+ .getRMContext());
+ }
+
+// @Test
+ public void testCapacityScheduler() throws Exception {
+
+ LOG.info("--- START: testCapacityScheduler ---");
+
+ final int GB = 1024;
+
+ // Register node1
+ String host_0 = "host_0";
+ org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 =
+ registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, 4 * GB);
+ nm_0.heartbeat();
+
+ // Register node2
+ String host_1 = "host_1";
+ org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_1 =
+ registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, 2 * GB);
+ nm_1.heartbeat();
+
+ // ResourceRequest priorities
+ Priority priority_0 =
+ org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.create(0);
+ Priority priority_1 =
+ org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.create(1);
+
+ // Submit an application
+ Application application_0 = new Application("user_0", "a1", resourceManager);
+ application_0.submit();
+
+ application_0.addNodeManager(host_0, 1234, nm_0);
+ application_0.addNodeManager(host_1, 1234, nm_1);
+
+ Resource capability_0_0 = Resources.createResource(1 * GB);
+ application_0.addResourceRequestSpec(priority_1, capability_0_0);
+
+ Resource capability_0_1 = Resources.createResource(2 * GB);
+ application_0.addResourceRequestSpec(priority_0, capability_0_1);
+
+ Task task_0_0 = new Task(application_0, priority_1,
+ new String[] {host_0, host_1});
+ application_0.addTask(task_0_0);
+
+ // Submit another application
+ Application application_1 = new Application("user_1", "b2", resourceManager);
+ application_1.submit();
+
+ application_1.addNodeManager(host_0, 1234, nm_0);
+ application_1.addNodeManager(host_1, 1234, nm_1);
+
+ Resource capability_1_0 = Resources.createResource(3 * GB);
+ application_1.addResourceRequestSpec(priority_1, capability_1_0);
+
+ Resource capability_1_1 = Resources.createResource(2 * GB);
+ application_1.addResourceRequestSpec(priority_0, capability_1_1);
+
+ Task task_1_0 = new Task(application_1, priority_1,
+ new String[] {host_0, host_1});
+ application_1.addTask(task_1_0);
+
+ // Send resource requests to the scheduler
+ application_0.schedule();
+ application_1.schedule();
+
+ // Send a heartbeat to kick the tires on the Scheduler
+ LOG.info("Kick!");
+ nm_0.heartbeat(); // task_0_0 and task_1_0 allocated, used=4G
+ nm_1.heartbeat(); // nothing allocated
+
+ // Get allocations from the scheduler
+ application_0.schedule(); // task_0_0
+ checkApplicationResourceUsage(1 * GB, application_0);
+
+ application_1.schedule(); // task_1_0
+ checkApplicationResourceUsage(3 * GB, application_1);
+
+ nm_0.heartbeat();
+ nm_1.heartbeat();
+
+ checkNodeResourceUsage(4*GB, nm_0); // task_0_0 (1G) and task_1_0 (3G)
+ checkNodeResourceUsage(0*GB, nm_1); // no tasks, 2G available
+
+ LOG.info("Adding new tasks...");
+
+ Task task_1_1 = new Task(application_1, priority_0,
+ new String[] {RMNode.ANY});
+ application_1.addTask(task_1_1);
+
+ application_1.schedule();
+
+ Task task_0_1 = new Task(application_0, priority_0,
+ new String[] {host_0, host_1});
+ application_0.addTask(task_0_1);
+
+ application_0.schedule();
+
+ // Send a heartbeat to kick the tires on the Scheduler
+ LOG.info("Sending hb from " + nm_0.getHostName());
+ nm_0.heartbeat(); // nothing new, used=4G
+
+ LOG.info("Sending hb from " + nm_1.getHostName());
+ nm_1.heartbeat(); // task_0_3, used=2G
+
+ // Get allocations from the scheduler
+ LOG.info("Trying to allocate...");
+ application_0.schedule();
+ checkApplicationResourceUsage(1 * GB, application_0);
+
+ application_1.schedule();
+ checkApplicationResourceUsage(5 * GB, application_1);
+
+ nm_0.heartbeat();
+ nm_1.heartbeat();
+ checkNodeResourceUsage(4*GB, nm_0);
+ checkNodeResourceUsage(2*GB, nm_1);
+
+ LOG.info("--- END: testCapacityScheduler ---");
+ }
+
+ private void setupQueueConfiguration(CapacitySchedulerConfiguration conf) {
+
+ // Define top-level queues
+ conf.setQueues(CapacityScheduler.ROOT, new String[] {"a", "b"});
+ conf.setCapacity(CapacityScheduler.ROOT, 100);
+
+ final String A = CapacityScheduler.ROOT + ".a";
+ conf.setCapacity(A, 10);
+
+ final String B = CapacityScheduler.ROOT + ".b";
+ conf.setCapacity(B, 90);
+
+ // Define 2nd-level queues
+ final String A1 = A + ".a1";
+ final String A2 = A + ".a2";
+ conf.setQueues(A, new String[] {"a1", "a2"});
+ conf.setCapacity(A1, 30);
+ conf.setUserLimitFactor(A1, 100.0f);
+ conf.setCapacity(A2, 70);
+ conf.setUserLimitFactor(A2, 100.0f);
+
+ final String B1 = B + ".b1";
+ final String B2 = B + ".b2";
+ final String B3 = B + ".b3";
+ conf.setQueues(B, new String[] {"b1", "b2", "b3"});
+ conf.setCapacity(B1, 50);
+ conf.setUserLimitFactor(B1, 100.0f);
+ conf.setCapacity(B2, 30);
+ conf.setUserLimitFactor(B2, 100.0f);
+ conf.setCapacity(B3, 20);
+ conf.setUserLimitFactor(B3, 100.0f);
+
+ LOG.info("Setup top-level queues a and b");
+ }
+
+ private void checkApplicationResourceUsage(int expected,
+ Application application) {
+ Assert.assertEquals(expected, application.getUsedResources().getMemory());
+ }
+
+ private void checkNodeResourceUsage(int expected,
+ org.apache.hadoop.yarn.server.resourcemanager.NodeManager node) {
+ Assert.assertEquals(expected, node.getUsed().getMemory());
+ node.checkResourceUsage();
+ }
+
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
new file mode 100644
index 0000000..fa80f2b
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -0,0 +1,743 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.*;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+public class TestLeafQueue {
+ private static final Log LOG = LogFactory.getLog(TestLeafQueue.class);
+
+ private final RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(null);
+
+ RMContext rmContext;
+ CapacityScheduler cs;
+ CapacitySchedulerConfiguration csConf;
+ CapacitySchedulerContext csContext;
+
+ Queue root;
+ Map<String, Queue> queues = new HashMap<String, Queue>();
+
+ final static int GB = 1024;
+ final static String DEFAULT_RACK = "/default";
+
+ @Before
+ public void setUp() throws Exception {
+ cs = new CapacityScheduler();
+ rmContext = TestUtils.getMockRMContext();
+
+ csConf =
+ new CapacitySchedulerConfiguration();
+ setupQueueConfiguration(csConf);
+
+
+ csContext = mock(CapacitySchedulerContext.class);
+ when(csContext.getConfiguration()).thenReturn(csConf);
+ when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB));
+ when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16*GB));
+ root =
+ CapacityScheduler.parseQueue(csContext, csConf, null, "root",
+ queues, queues,
+ CapacityScheduler.queueComparator,
+ CapacityScheduler.applicationComparator,
+ TestUtils.spyHook);
+
+ cs.reinitialize(csConf, null, rmContext);
+ }
+
+ private static final String A = "a";
+ private static final String B = "b";
+ private void setupQueueConfiguration(CapacitySchedulerConfiguration conf) {
+
+ // Define top-level queues
+ conf.setQueues(CapacityScheduler.ROOT, new String[] {A, B});
+ conf.setCapacity(CapacityScheduler.ROOT, 100);
+
+ final String Q_A = CapacityScheduler.ROOT + "." + A;
+ conf.setCapacity(Q_A, 10);
+
+ final String Q_B = CapacityScheduler.ROOT + "." + B;
+ conf.setCapacity(Q_B, 90);
+
+ LOG.info("Setup top-level queues a and b");
+ }
+
+ private LeafQueue stubLeafQueue(LeafQueue queue) {
+
+ // Mock some methods for ease in these unit tests
+
+ // 1. LeafQueue.createContainer to return dummy containers
+ doAnswer(
+ new Answer<Container>() {
+ @Override
+ public Container answer(InvocationOnMock invocation)
+ throws Throwable {
+ final SchedulerApp application =
+ (SchedulerApp)(invocation.getArguments()[0]);
+ final ContainerId containerId =
+ TestUtils.getMockContainerId(application);
+
+ Container container = TestUtils.getMockContainer(
+ containerId,
+ ((SchedulerNode)(invocation.getArguments()[1])).getNodeID(),
+ (Resource)(invocation.getArguments()[2]));
+ return container;
+ }
+ }
+ ).
+ when(queue).createContainer(
+ any(SchedulerApp.class),
+ any(SchedulerNode.class),
+ any(Resource.class));
+
+ // 2. Stub out LeafQueue.parent.completedContainer
+ Queue parent = queue.getParent();
+ doNothing().when(parent).completedContainer(
+ any(Resource.class), any(SchedulerApp.class), any(SchedulerNode.class),
+ any(RMContainer.class), any(RMContainerEventType.class));
+
+ return queue;
+ }
+
+ @Test
+ public void testSingleQueueWithOneUser() throws Exception {
+
+ // Manipulate queue 'a'
+ LeafQueue a = stubLeafQueue((LeafQueue)queues.get(A));
+
+ // Users
+ final String user_0 = "user_0";
+
+ // Submit applications
+ final ApplicationAttemptId appAttemptId_0 =
+ TestUtils.getMockApplicationAttemptId(0, 0);
+ SchedulerApp app_0 =
+ new SchedulerApp(appAttemptId_0, user_0, a, rmContext, null);
+ a.submitApplication(app_0, user_0, A);
+
+ final ApplicationAttemptId appAttemptId_1 =
+ TestUtils.getMockApplicationAttemptId(1, 0);
+ SchedulerApp app_1 =
+ new SchedulerApp(appAttemptId_1, user_0, a, rmContext, null);
+ a.submitApplication(app_1, user_0, A); // same user
+
+ // Setup some nodes
+ String host_0 = "host_0";
+ SchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB);
+
+ final int numNodes = 1;
+ Resource clusterResource = Resources.createResource(numNodes * (8*GB));
+ when(csContext.getNumClusterNodes()).thenReturn(numNodes);
+
+ // Setup resource-requests
+ Priority priority = TestUtils.createMockPriority(1);
+ app_0.updateResourceRequests(Collections.singletonList(
+ TestUtils.createResourceRequest(RMNodeImpl.ANY, 1*GB, 3, priority,
+ recordFactory)));
+
+ app_1.updateResourceRequests(Collections.singletonList(
+ TestUtils.createResourceRequest(RMNodeImpl.ANY, 1*GB, 2, priority,
+ recordFactory)));
+
+ // Start testing...
+
+ // Only 1 container
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(1*GB, a.getUsedResources().getMemory());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+
+ // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also
+ // you can get one container more than user-limit
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(2*GB, a.getUsedResources().getMemory());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+
+ // Can't allocate 3rd due to user-limit
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(2*GB, a.getUsedResources().getMemory());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+
+ // Bump up user-limit-factor, now allocate should work
+ a.setUserLimitFactor(10);
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(3*GB, a.getUsedResources().getMemory());
+ assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+
+ // One more should work, for app_1, due to user-limit-factor
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(4*GB, a.getUsedResources().getMemory());
+ assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
+
+ // Test max-capacity
+ // Now - no more allocs since we are at max-cap
+ a.setMaxCapacity(0.5f);
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(4*GB, a.getUsedResources().getMemory());
+ assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
+
+ // Release each container from app_0
+ for (RMContainer rmContainer : app_0.getLiveContainers()) {
+ a.completedContainer(clusterResource, app_0, node_0, rmContainer,
+ RMContainerEventType.KILL);
+ }
+ assertEquals(1*GB, a.getUsedResources().getMemory());
+ assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
+
+ // Release each container from app_1
+ for (RMContainer rmContainer : app_1.getLiveContainers()) {
+ a.completedContainer(clusterResource, app_1, node_0, rmContainer,
+ RMContainerEventType.KILL);
+ }
+ assertEquals(0*GB, a.getUsedResources().getMemory());
+ assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ }
+
+ @Test
+ public void testSingleQueueWithMultipleUsers() throws Exception {
+
+ // Mock the queue
+ LeafQueue a = stubLeafQueue((LeafQueue)queues.get(A));
+
+ // Users
+ final String user_0 = "user_0";
+ final String user_1 = "user_1";
+ final String user_2 = "user_2";
+
+ // Submit applications
+ final ApplicationAttemptId appAttemptId_0 =
+ TestUtils.getMockApplicationAttemptId(0, 0);
+ SchedulerApp app_0 =
+ new SchedulerApp(appAttemptId_0, user_0, a, rmContext, null);
+ a.submitApplication(app_0, user_0, A);
+
+ final ApplicationAttemptId appAttemptId_1 =
+ TestUtils.getMockApplicationAttemptId(1, 0);
+ SchedulerApp app_1 =
+ new SchedulerApp(appAttemptId_1, user_0, a, rmContext, null);
+ a.submitApplication(app_1, user_0, A); // same user
+
+ // Setup some nodes
+ String host_0 = "host_0";
+ SchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB);
+
+ final int numNodes = 1;
+ Resource clusterResource = Resources.createResource(numNodes * (8*GB));
+ when(csContext.getNumClusterNodes()).thenReturn(numNodes);
+
+ // Setup resource-requests
+ Priority priority = TestUtils.createMockPriority(1);
+ app_0.updateResourceRequests(Collections.singletonList(
+ TestUtils.createResourceRequest(RMNodeImpl.ANY, 1*GB, 10, priority,
+ recordFactory)));
+
+ app_1.updateResourceRequests(Collections.singletonList(
+ TestUtils.createResourceRequest(RMNodeImpl.ANY, 1*GB, 10, priority,
+ recordFactory)));
+
+ /**
+ * Start testing...
+ */
+
+ // Only 1 container
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(1*GB, a.getUsedResources().getMemory());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+
+ // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also
+ // you can get one container more than user-limit
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(2*GB, a.getUsedResources().getMemory());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+
+ // Can't allocate 3rd due to user-limit
+ a.setUserLimit(25);
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(2*GB, a.getUsedResources().getMemory());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+
+ // Submit more apps
+ final ApplicationAttemptId appAttemptId_2 =
+ TestUtils.getMockApplicationAttemptId(2, 0);
+ SchedulerApp app_2 =
+ new SchedulerApp(appAttemptId_2, user_1, a, rmContext, null);
+ a.submitApplication(app_2, user_1, A);
+
+ final ApplicationAttemptId appAttemptId_3 =
+ TestUtils.getMockApplicationAttemptId(3, 0);
+ SchedulerApp app_3 =
+ new SchedulerApp(appAttemptId_3, user_2, a, rmContext, null);
+ a.submitApplication(app_3, user_2, A);
+
+ app_2.updateResourceRequests(Collections.singletonList(
+ TestUtils.createResourceRequest(RMNodeImpl.ANY, 3*GB, 1, priority,
+ recordFactory)));
+
+ app_3.updateResourceRequests(Collections.singletonList(
+ TestUtils.createResourceRequest(RMNodeImpl.ANY, 1*GB, 2, priority,
+ recordFactory)));
+
+ // Now allocations should goto app_2 since
+ // user_0 is at limit inspite of high user-limit-factor
+ a.setUserLimitFactor(10);
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(5*GB, a.getUsedResources().getMemory());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(3*GB, app_2.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_3.getCurrentConsumption().getMemory());
+
+ // Now allocations should goto app_0 since
+ // user_0 is at user-limit not above it
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(6*GB, a.getUsedResources().getMemory());
+ assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(3*GB, app_2.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_3.getCurrentConsumption().getMemory());
+
+ // Test max-capacity
+ // Now - no more allocs since we are at max-cap
+ a.setMaxCapacity(0.5f);
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(6*GB, a.getUsedResources().getMemory());
+ assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(3*GB, app_2.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_3.getCurrentConsumption().getMemory());
+
+ // Revert max-capacity and user-limit-factor
+ // Now, allocations should goto app_3 since it's under user-limit
+ a.setMaxCapacity(-1);
+ a.setUserLimitFactor(1);
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(7*GB, a.getUsedResources().getMemory());
+ assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(3*GB, app_2.getCurrentConsumption().getMemory());
+ assertEquals(1*GB, app_3.getCurrentConsumption().getMemory());
+
+ // Now we should assign to app_3 again since user_2 is under user-limit
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(8*GB, a.getUsedResources().getMemory());
+ assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(3*GB, app_2.getCurrentConsumption().getMemory());
+ assertEquals(2*GB, app_3.getCurrentConsumption().getMemory());
+
+ // 8. Release each container from app_0
+ for (RMContainer rmContainer : app_0.getLiveContainers()) {
+ a.completedContainer(clusterResource, app_0, node_0, rmContainer,
+ RMContainerEventType.KILL);
+ }
+ assertEquals(5*GB, a.getUsedResources().getMemory());
+ assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(3*GB, app_2.getCurrentConsumption().getMemory());
+ assertEquals(2*GB, app_3.getCurrentConsumption().getMemory());
+
+ // 9. Release each container from app_2
+ for (RMContainer rmContainer : app_2.getLiveContainers()) {
+ a.completedContainer(clusterResource, app_2, node_0, rmContainer,
+ RMContainerEventType.KILL);
+ }
+ assertEquals(2*GB, a.getUsedResources().getMemory());
+ assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_2.getCurrentConsumption().getMemory());
+ assertEquals(2*GB, app_3.getCurrentConsumption().getMemory());
+
+ // 10. Release each container from app_3
+ for (RMContainer rmContainer : app_3.getLiveContainers()) {
+ a.completedContainer(clusterResource, app_3, node_0, rmContainer,
+ RMContainerEventType.KILL);
+ }
+ assertEquals(0*GB, a.getUsedResources().getMemory());
+ assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_2.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_3.getCurrentConsumption().getMemory());
+ }
+
+ @Test
+ public void testReservation() throws Exception {
+
+ // Manipulate queue 'a'
+ LeafQueue a = stubLeafQueue((LeafQueue)queues.get(A));
+
+ // Users
+ final String user_0 = "user_0";
+ final String user_1 = "user_1";
+
+ // Submit applications
+ final ApplicationAttemptId appAttemptId_0 =
+ TestUtils.getMockApplicationAttemptId(0, 0);
+ SchedulerApp app_0 =
+ new SchedulerApp(appAttemptId_0, user_0, a, rmContext, null);
+ a.submitApplication(app_0, user_0, A);
+
+ final ApplicationAttemptId appAttemptId_1 =
+ TestUtils.getMockApplicationAttemptId(1, 0);
+ SchedulerApp app_1 =
+ new SchedulerApp(appAttemptId_1, user_1, a, rmContext, null);
+ a.submitApplication(app_1, user_1, A);
+
+ // Setup some nodes
+ String host_0 = "host_0";
+ SchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 4*GB);
+
+ final int numNodes = 1;
+ Resource clusterResource = Resources.createResource(numNodes * (8*GB));
+ when(csContext.getNumClusterNodes()).thenReturn(numNodes);
+
+ // Setup resource-requests
+ Priority priority = TestUtils.createMockPriority(1);
+ app_0.updateResourceRequests(Collections.singletonList(
+ TestUtils.createResourceRequest(RMNodeImpl.ANY, 1*GB, 2, priority,
+ recordFactory)));
+
+ app_1.updateResourceRequests(Collections.singletonList(
+ TestUtils.createResourceRequest(RMNodeImpl.ANY, 4*GB, 1, priority,
+ recordFactory)));
+
+ // Start testing...
+
+ // Only 1 container
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(1*GB, a.getUsedResources().getMemory());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+
+ // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also
+ // you can get one container more than user-limit
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(2*GB, a.getUsedResources().getMemory());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+
+ // Now, reservation should kick in for app_1
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(6*GB, a.getUsedResources().getMemory());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(4*GB, app_1.getCurrentReservation().getMemory());
+ assertEquals(2*GB, node_0.getUsedResource().getMemory());
+
+ // Now free 1 container from app_0 i.e. 1G
+ a.completedContainer(clusterResource, app_0, node_0,
+ app_0.getLiveContainers().iterator().next(), RMContainerEventType.KILL);
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(5*GB, a.getUsedResources().getMemory());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(4*GB, app_1.getCurrentReservation().getMemory());
+ assertEquals(1*GB, node_0.getUsedResource().getMemory());
+
+ // Now finish another container from app_0 and fulfill the reservation
+ a.completedContainer(clusterResource, app_0, node_0,
+ app_0.getLiveContainers().iterator().next(), RMContainerEventType.KILL);
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(4*GB, a.getUsedResources().getMemory());
+ assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(4*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentReservation().getMemory());
+ assertEquals(4*GB, node_0.getUsedResource().getMemory());
+ }
+
+
+ @Test
+ public void testLocalityScheduling() throws Exception {
+
+ // Manipulate queue 'a'
+ LeafQueue a = stubLeafQueue((LeafQueue)queues.get(A));
+
+ // User
+ String user_0 = "user_0";
+
+ // Submit applications
+ final ApplicationAttemptId appAttemptId_0 =
+ TestUtils.getMockApplicationAttemptId(0, 0);
+ SchedulerApp app_0 =
+ spy(new SchedulerApp(appAttemptId_0, user_0, a, rmContext, null));
+ a.submitApplication(app_0, user_0, A);
+
+ // Setup some nodes and racks
+ String host_0 = "host_0";
+ String rack_0 = "rack_0";
+ SchedulerNode node_0 = TestUtils.getMockNode(host_0, rack_0, 0, 8*GB);
+
+ String host_1 = "host_1";
+ String rack_1 = "rack_1";
+ SchedulerNode node_1 = TestUtils.getMockNode(host_1, rack_1, 0, 8*GB);
+
+ String host_2 = "host_2";
+ String rack_2 = "rack_2";
+ SchedulerNode node_2 = TestUtils.getMockNode(host_2, rack_2, 0, 8*GB);
+
+ final int numNodes = 3;
+ Resource clusterResource = Resources.createResource(numNodes * (8*GB));
+ when(csContext.getNumClusterNodes()).thenReturn(numNodes);
+
+ // Setup resource-requests and submit
+ Priority priority = TestUtils.createMockPriority(1);
+ List<ResourceRequest> app_0_requests_0 = new ArrayList<ResourceRequest>();
+ app_0_requests_0.add(
+ TestUtils.createResourceRequest(host_0, 1*GB, 1,
+ priority, recordFactory));
+ app_0_requests_0.add(
+ TestUtils.createResourceRequest(rack_0, 1*GB, 1,
+ priority, recordFactory));
+ app_0_requests_0.add(
+ TestUtils.createResourceRequest(host_1, 1*GB, 1,
+ priority, recordFactory));
+ app_0_requests_0.add(
+ TestUtils.createResourceRequest(rack_1, 1*GB, 1,
+ priority, recordFactory));
+ app_0_requests_0.add(
+ TestUtils.createResourceRequest(RMNodeImpl.ANY, 1*GB, 3, // one extra
+ priority, recordFactory));
+ app_0.updateResourceRequests(app_0_requests_0);
+
+ // Start testing...
+
+ // Start with off switch, shouldn't allocate due to delay scheduling
+ a.assignContainers(clusterResource, node_2);
+ verify(app_0, never()).allocate(any(NodeType.class), eq(node_2),
+ any(Priority.class), any(ResourceRequest.class), any(Container.class));
+ assertEquals(1, app_0.getSchedulingOpportunities(priority));
+ assertEquals(3, app_0.getTotalRequiredResources(priority));
+
+ // Another off switch, shouldn't allocate due to delay scheduling
+ a.assignContainers(clusterResource, node_2);
+ verify(app_0, never()).allocate(any(NodeType.class), eq(node_2),
+ any(Priority.class), any(ResourceRequest.class), any(Container.class));
+ assertEquals(2, app_0.getSchedulingOpportunities(priority));
+ assertEquals(3, app_0.getTotalRequiredResources(priority));
+
+ // Another off switch, shouldn't allocate due to delay scheduling
+ a.assignContainers(clusterResource, node_2);
+ verify(app_0, never()).allocate(any(NodeType.class), eq(node_2),
+ any(Priority.class), any(ResourceRequest.class), any(Container.class));
+ assertEquals(3, app_0.getSchedulingOpportunities(priority));
+ assertEquals(3, app_0.getTotalRequiredResources(priority));
+
+ // Another off switch, now we should allocate
+ // since missedOpportunities=3 and reqdContainers=3
+ a.assignContainers(clusterResource, node_2);
+ verify(app_0).allocate(eq(NodeType.OFF_SWITCH), eq(node_2),
+ any(Priority.class), any(ResourceRequest.class), any(Container.class));
+ assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should reset
+ assertEquals(2, app_0.getTotalRequiredResources(priority));
+
+ // NODE_LOCAL - node_0
+ a.assignContainers(clusterResource, node_0);
+ verify(app_0).allocate(eq(NodeType.NODE_LOCAL), eq(node_0),
+ any(Priority.class), any(ResourceRequest.class), any(Container.class));
+ assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should reset
+ assertEquals(1, app_0.getTotalRequiredResources(priority));
+
+ // NODE_LOCAL - node_1
+ a.assignContainers(clusterResource, node_1);
+ verify(app_0).allocate(eq(NodeType.NODE_LOCAL), eq(node_1),
+ any(Priority.class), any(ResourceRequest.class), any(Container.class));
+ assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should reset
+ assertEquals(0, app_0.getTotalRequiredResources(priority));
+
+ // Add 1 more request to check for RACK_LOCAL
+ app_0_requests_0.clear();
+ app_0_requests_0.add(
+ TestUtils.createResourceRequest(host_1, 1*GB, 1,
+ priority, recordFactory));
+ app_0_requests_0.add(
+ TestUtils.createResourceRequest(rack_1, 1*GB, 1,
+ priority, recordFactory));
+ app_0_requests_0.add(
+ TestUtils.createResourceRequest(RMNodeImpl.ANY, 1*GB, 1, // one extra
+ priority, recordFactory));
+ app_0.updateResourceRequests(app_0_requests_0);
+ assertEquals(1, app_0.getTotalRequiredResources(priority));
+
+ String host_3 = "host_3"; // on rack_1
+ SchedulerNode node_3 = TestUtils.getMockNode(host_3, rack_1, 0, 8*GB);
+
+ a.assignContainers(clusterResource, node_3);
+ verify(app_0).allocate(eq(NodeType.RACK_LOCAL), eq(node_3),
+ any(Priority.class), any(ResourceRequest.class), any(Container.class));
+ assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should reset
+ assertEquals(0, app_0.getTotalRequiredResources(priority));
+ }
+
+ @Test
+ public void testApplicationPriorityScheduling() throws Exception {
+ // Manipulate queue 'a'
+ LeafQueue a = stubLeafQueue((LeafQueue)queues.get(A));
+
+ // User
+ String user_0 = "user_0";
+
+ // Submit applications
+ final ApplicationAttemptId appAttemptId_0 =
+ TestUtils.getMockApplicationAttemptId(0, 0);
+ SchedulerApp app_0 =
+ spy(new SchedulerApp(appAttemptId_0, user_0, a, rmContext, null));
+ a.submitApplication(app_0, user_0, A);
+
+ // Setup some nodes and racks
+ String host_0 = "host_0";
+ String rack_0 = "rack_0";
+ SchedulerNode node_0 = TestUtils.getMockNode(host_0, rack_0, 0, 8*GB);
+
+ String host_1 = "host_1";
+ String rack_1 = "rack_1";
+ SchedulerNode node_1 = TestUtils.getMockNode(host_1, rack_1, 0, 8*GB);
+
+ String host_2 = "host_2";
+ String rack_2 = "rack_2";
+ SchedulerNode node_2 = TestUtils.getMockNode(host_2, rack_2, 0, 8*GB);
+
+ final int numNodes = 3;
+ Resource clusterResource = Resources.createResource(numNodes * (8*GB));
+ when(csContext.getNumClusterNodes()).thenReturn(numNodes);
+
+ // Setup resource-requests and submit
+ List<ResourceRequest> app_0_requests_0 = new ArrayList<ResourceRequest>();
+
+ // P1
+ Priority priority_1 = TestUtils.createMockPriority(1);
+ app_0_requests_0.add(
+ TestUtils.createResourceRequest(host_0, 1*GB, 1,
+ priority_1, recordFactory));
+ app_0_requests_0.add(
+ TestUtils.createResourceRequest(rack_0, 1*GB, 1,
+ priority_1, recordFactory));
+ app_0_requests_0.add(
+ TestUtils.createResourceRequest(host_1, 1*GB, 1,
+ priority_1, recordFactory));
+ app_0_requests_0.add(
+ TestUtils.createResourceRequest(rack_1, 1*GB, 1,
+ priority_1, recordFactory));
+ app_0_requests_0.add(
+ TestUtils.createResourceRequest(RMNodeImpl.ANY, 1*GB, 2,
+ priority_1, recordFactory));
+
+ // P2
+ Priority priority_2 = TestUtils.createMockPriority(2);
+ app_0_requests_0.add(
+ TestUtils.createResourceRequest(host_2, 2*GB, 1,
+ priority_2, recordFactory));
+ app_0_requests_0.add(
+ TestUtils.createResourceRequest(rack_2, 2*GB, 1,
+ priority_2, recordFactory));
+ app_0_requests_0.add(
+ TestUtils.createResourceRequest(RMNodeImpl.ANY, 2*GB, 1,
+ priority_2, recordFactory));
+
+ app_0.updateResourceRequests(app_0_requests_0);
+
+ // Start testing...
+
+ // Start with off switch, shouldn't allocate P1 due to delay scheduling
+ // thus, no P2 either!
+ a.assignContainers(clusterResource, node_2);
+ verify(app_0, never()).allocate(any(NodeType.class), eq(node_2),
+ eq(priority_1), any(ResourceRequest.class), any(Container.class));
+ assertEquals(1, app_0.getSchedulingOpportunities(priority_1));
+ assertEquals(2, app_0.getTotalRequiredResources(priority_1));
+ verify(app_0, never()).allocate(any(NodeType.class), eq(node_2),
+ eq(priority_2), any(ResourceRequest.class), any(Container.class));
+ assertEquals(0, app_0.getSchedulingOpportunities(priority_2));
+ assertEquals(1, app_0.getTotalRequiredResources(priority_2));
+
+ // Another off-switch, shouldn't allocate P1 due to delay scheduling
+ // thus, no P2 either!
+ a.assignContainers(clusterResource, node_2);
+ verify(app_0, never()).allocate(any(NodeType.class), eq(node_2),
+ eq(priority_1), any(ResourceRequest.class), any(Container.class));
+ assertEquals(2, app_0.getSchedulingOpportunities(priority_1));
+ assertEquals(2, app_0.getTotalRequiredResources(priority_1));
+ verify(app_0, never()).allocate(any(NodeType.class), eq(node_2),
+ eq(priority_2), any(ResourceRequest.class), any(Container.class));
+ assertEquals(0, app_0.getSchedulingOpportunities(priority_2));
+ assertEquals(1, app_0.getTotalRequiredResources(priority_2));
+
+ // Another off-switch, shouldn allocate OFF_SWITCH P1
+ a.assignContainers(clusterResource, node_2);
+ verify(app_0).allocate(eq(NodeType.OFF_SWITCH), eq(node_2),
+ eq(priority_1), any(ResourceRequest.class), any(Container.class));
+ assertEquals(0, app_0.getSchedulingOpportunities(priority_1));
+ assertEquals(1, app_0.getTotalRequiredResources(priority_1));
+ verify(app_0, never()).allocate(any(NodeType.class), eq(node_2),
+ eq(priority_2), any(ResourceRequest.class), any(Container.class));
+ assertEquals(0, app_0.getSchedulingOpportunities(priority_2));
+ assertEquals(1, app_0.getTotalRequiredResources(priority_2));
+
+ // Now, DATA_LOCAL for P1
+ a.assignContainers(clusterResource, node_0);
+ verify(app_0).allocate(eq(NodeType.NODE_LOCAL), eq(node_0),
+ eq(priority_1), any(ResourceRequest.class), any(Container.class));
+ assertEquals(0, app_0.getSchedulingOpportunities(priority_1));
+ assertEquals(0, app_0.getTotalRequiredResources(priority_1));
+ verify(app_0, never()).allocate(any(NodeType.class), eq(node_0),
+ eq(priority_2), any(ResourceRequest.class), any(Container.class));
+ assertEquals(0, app_0.getSchedulingOpportunities(priority_2));
+ assertEquals(1, app_0.getTotalRequiredResources(priority_2));
+
+ // Now, OFF_SWITCH for P2
+ a.assignContainers(clusterResource, node_1);
+ verify(app_0, never()).allocate(any(NodeType.class), eq(node_1),
+ eq(priority_1), any(ResourceRequest.class), any(Container.class));
+ assertEquals(0, app_0.getSchedulingOpportunities(priority_1));
+ assertEquals(0, app_0.getTotalRequiredResources(priority_1));
+ verify(app_0).allocate(eq(NodeType.OFF_SWITCH), eq(node_1),
+ eq(priority_2), any(ResourceRequest.class), any(Container.class));
+ assertEquals(0, app_0.getSchedulingOpportunities(priority_2));
+ assertEquals(0, app_0.getTotalRequiredResources(priority_2));
+
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
new file mode 100644
index 0000000..a5e0749
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
@@ -0,0 +1,378 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.*;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.InOrder;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+public class TestParentQueue {
+
+ private static final Log LOG = LogFactory.getLog(TestParentQueue.class);
+
+ RMContext rmContext;
+ CapacitySchedulerConfiguration csConf;
+ CapacitySchedulerContext csContext;
+
+ final static int GB = 1024;
+ final static String DEFAULT_RACK = "/default";
+
+ @Before
+ public void setUp() throws Exception {
+ rmContext = TestUtils.getMockRMContext();
+ csConf = new CapacitySchedulerConfiguration();
+
+ csContext = mock(CapacitySchedulerContext.class);
+ when(csContext.getConfiguration()).thenReturn(csConf);
+ when(csContext.getMinimumResourceCapability()).thenReturn(
+ Resources.createResource(GB));
+ when(csContext.getMaximumResourceCapability()).thenReturn(
+ Resources.createResource(16*GB));
+ }
+
+ private static final String A = "a";
+ private static final String B = "b";
+ private void setupSingleLevelQueues(CapacitySchedulerConfiguration conf) {
+
+ // Define top-level queues
+ conf.setQueues(CapacityScheduler.ROOT, new String[] {A, B});
+ conf.setCapacity(CapacityScheduler.ROOT, 100);
+
+ final String Q_A = CapacityScheduler.ROOT + "." + A;
+ conf.setCapacity(Q_A, 30);
+
+ final String Q_B = CapacityScheduler.ROOT + "." + B;
+ conf.setCapacity(Q_B, 70);
+
+ LOG.info("Setup top-level queues a and b");
+ }
+
+ private void stubQueueAllocation(final Queue queue,
+ final Resource clusterResource, final SchedulerNode node,
+ final int allocation) {
+
+ // Simulate the queue allocation
+ doAnswer(new Answer<Resource>() {
+ @Override
+ public Resource answer(InvocationOnMock invocation) throws Throwable {
+ try {
+ throw new Exception();
+ } catch (Exception e) {
+ LOG.info("FOOBAR q.assignContainers q=" + queue.getQueueName() +
+ " alloc=" + allocation + " node=" + node.getHostName());
+ }
+ final Resource allocatedResource = Resources.createResource(allocation);
+ if (queue instanceof ParentQueue) {
+ ((ParentQueue)queue).allocateResource(clusterResource,
+ allocatedResource);
+ } else {
+ ((LeafQueue)queue).allocateResource(clusterResource, "",
+ allocatedResource);
+ }
+
+ // Next call - nothing
+ if (allocation > 0) {
+ doReturn(Resources.none()).when(queue).assignContainers(
+ eq(clusterResource), eq(node));
+
+ // Mock the node's resource availability
+ Resource available = node.getAvailableResource();
+ doReturn(Resources.subtractFrom(available, allocatedResource)).
+ when(node).getAvailableResource();
+ }
+
+ return allocatedResource;
+ }
+ }).
+ when(queue).assignContainers(eq(clusterResource), eq(node));
+ }
+
+ private float computeQueueUtilization(Queue queue,
+ int expectedMemory, Resource clusterResource) {
+ return (expectedMemory /
+ (clusterResource.getMemory() * queue.getAbsoluteCapacity()));
+ }
+
+ @Test
+ public void testSingleLevelQueues() throws Exception {
+ // Setup queue configs
+ setupSingleLevelQueues(csConf);
+
+ Map<String, Queue> queues = new HashMap<String, Queue>();
+ Queue root =
+ CapacityScheduler.parseQueue(csContext, csConf, null,
+ CapacityScheduler.ROOT, queues, queues,
+ CapacityScheduler.queueComparator,
+ CapacityScheduler.applicationComparator,
+ TestUtils.spyHook);
+
+ // Setup some nodes
+ final int memoryPerNode = 10;
+ final int numNodes = 2;
+
+ SchedulerNode node_0 =
+ TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, memoryPerNode*GB);
+ SchedulerNode node_1 =
+ TestUtils.getMockNode("host_1", DEFAULT_RACK, 0, memoryPerNode*GB);
+
+ final Resource clusterResource =
+ Resources.createResource(numNodes * (memoryPerNode*GB));
+ when(csContext.getNumClusterNodes()).thenReturn(numNodes);
+
+ // Start testing
+ LeafQueue a = (LeafQueue)queues.get(A);
+ LeafQueue b = (LeafQueue)queues.get(B);
+ final float delta = 0.0001f;
+
+ // Simulate B returning a container on node_0
+ stubQueueAllocation(a, clusterResource, node_0, 0*GB);
+ stubQueueAllocation(b, clusterResource, node_0, 1*GB);
+ root.assignContainers(clusterResource, node_0);
+ assertEquals(0.0f, a.getUtilization(), delta);
+ assertEquals(computeQueueUtilization(b, 1*GB, clusterResource),
+ b.getUtilization(), delta);
+
+ // Now, A should get the scheduling opportunity since A=0G/6G, B=1G/14G
+ stubQueueAllocation(a, clusterResource, node_1, 2*GB);
+ stubQueueAllocation(b, clusterResource, node_1, 1*GB);
+ root.assignContainers(clusterResource, node_1);
+ InOrder allocationOrder = inOrder(a, b);
+ allocationOrder.verify(a).assignContainers(eq(clusterResource),
+ any(SchedulerNode.class));
+ allocationOrder.verify(b).assignContainers(eq(clusterResource),
+ any(SchedulerNode.class));
+ assertEquals(computeQueueUtilization(a, 2*GB, clusterResource),
+ a.getUtilization(), delta);
+ assertEquals(computeQueueUtilization(b, 2*GB, clusterResource),
+ b.getUtilization(), delta);
+
+ // Now, B should get the scheduling opportunity
+ // since A has 2/6G while B has 2/14G
+ stubQueueAllocation(a, clusterResource, node_0, 1*GB);
+ stubQueueAllocation(b, clusterResource, node_0, 2*GB);
+ root.assignContainers(clusterResource, node_0);
+ allocationOrder = inOrder(b, a);
+ allocationOrder.verify(b).assignContainers(eq(clusterResource),
+ any(SchedulerNode.class));
+ allocationOrder.verify(a).assignContainers(eq(clusterResource),
+ any(SchedulerNode.class));
+ assertEquals(computeQueueUtilization(a, 3*GB, clusterResource),
+ a.getUtilization(), delta);
+ assertEquals(computeQueueUtilization(b, 4*GB, clusterResource),
+ b.getUtilization(), delta);
+
+ // Now, B should still get the scheduling opportunity
+ // since A has 3/6G while B has 4/14G
+ stubQueueAllocation(a, clusterResource, node_0, 0*GB);
+ stubQueueAllocation(b, clusterResource, node_0, 4*GB);
+ root.assignContainers(clusterResource, node_0);
+ allocationOrder = inOrder(b, a);
+ allocationOrder.verify(b).assignContainers(eq(clusterResource),
+ any(SchedulerNode.class));
+ allocationOrder.verify(a).assignContainers(eq(clusterResource),
+ any(SchedulerNode.class));
+ assertEquals(computeQueueUtilization(a, 3*GB, clusterResource),
+ a.getUtilization(), delta);
+ assertEquals(computeQueueUtilization(b, 8*GB, clusterResource),
+ b.getUtilization(), delta);
+
+ // Now, A should get the scheduling opportunity
+ // since A has 3/6G while B has 8/14G
+ stubQueueAllocation(a, clusterResource, node_1, 1*GB);
+ stubQueueAllocation(b, clusterResource, node_1, 1*GB);
+ root.assignContainers(clusterResource, node_1);
+ allocationOrder = inOrder(a, b);
+ allocationOrder.verify(b).assignContainers(eq(clusterResource),
+ any(SchedulerNode.class));
+ allocationOrder.verify(a).assignContainers(eq(clusterResource),
+ any(SchedulerNode.class));
+ assertEquals(computeQueueUtilization(a, 4*GB, clusterResource),
+ a.getUtilization(), delta);
+ assertEquals(computeQueueUtilization(b, 9*GB, clusterResource),
+ b.getUtilization(), delta);
+ }
+
+ private static final String C = "c";
+ private static final String D = "d";
+ private static final String A1 = "a1";
+ private static final String A2 = "a2";
+ private static final String B1 = "b1";
+ private static final String B2 = "b2";
+ private static final String B3 = "b3";
+
+ private void setupMultiLevelQueues(CapacitySchedulerConfiguration conf) {
+
+ // Define top-level queues
+ conf.setQueues(CapacityScheduler.ROOT, new String[] {A, B, C, D});
+ conf.setCapacity(CapacityScheduler.ROOT, 100);
+
+ final String Q_A = CapacityScheduler.ROOT + "." + A;
+ conf.setCapacity(Q_A, 10);
+
+ final String Q_B = CapacityScheduler.ROOT + "." + B;
+ conf.setCapacity(Q_B, 50);
+
+ final String Q_C = CapacityScheduler.ROOT + "." + C;
+ conf.setCapacity(Q_C, 20);
+
+ final String Q_D = CapacityScheduler.ROOT + "." + D;
+ conf.setCapacity(Q_D, 20);
+
+ // Define 2-nd level queues
+ conf.setQueues(Q_A, new String[] {A1, A2});
+ conf.setCapacity(Q_A + "." + A1, 50);
+ conf.setCapacity(Q_A + "." + A2, 50);
+
+ conf.setQueues(Q_B, new String[] {B1, B2, B3});
+ conf.setCapacity(Q_B + "." + B1, 10);
+ conf.setCapacity(Q_B + "." + B2, 20);
+ conf.setCapacity(Q_B + "." + B3, 70);
+ }
+
+
+
+ @Test
+ public void testMultiLevelQueues() throws Exception {
+ // Setup queue configs
+ setupMultiLevelQueues(csConf);
+
+ Map<String, Queue> queues = new HashMap<String, Queue>();
+ Queue root =
+ CapacityScheduler.parseQueue(csContext, csConf, null,
+ CapacityScheduler.ROOT, queues, queues,
+ CapacityScheduler.queueComparator,
+ CapacityScheduler.applicationComparator,
+ TestUtils.spyHook);
+
+ // Setup some nodes
+ final int memoryPerNode = 10;
+ final int numNodes = 3;
+
+ SchedulerNode node_0 =
+ TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, memoryPerNode*GB);
+ SchedulerNode node_1 =
+ TestUtils.getMockNode("host_1", DEFAULT_RACK, 0, memoryPerNode*GB);
+ SchedulerNode node_2 =
+ TestUtils.getMockNode("host_2", DEFAULT_RACK, 0, memoryPerNode*GB);
+
+ final Resource clusterResource =
+ Resources.createResource(numNodes * (memoryPerNode*GB));
+ when(csContext.getNumClusterNodes()).thenReturn(numNodes);
+
+ // Start testing
+ Queue a = queues.get(A);
+ Queue b = queues.get(B);
+ Queue c = queues.get(C);
+ Queue d = queues.get(D);
+
+ Queue a1 = queues.get(A1);
+ Queue a2 = queues.get(A2);
+
+ Queue b1 = queues.get(B1);
+ Queue b2 = queues.get(B2);
+ Queue b3 = queues.get(B3);
+
+ final float delta = 0.0001f;
+
+ // Simulate C returning a container on node_0
+ stubQueueAllocation(a, clusterResource, node_0, 0*GB);
+ stubQueueAllocation(b, clusterResource, node_0, 0*GB);
+ stubQueueAllocation(c, clusterResource, node_0, 1*GB);
+ stubQueueAllocation(d, clusterResource, node_0, 0*GB);
+ root.assignContainers(clusterResource, node_0);
+ assertEquals(computeQueueUtilization(a, 0*GB, clusterResource),
+ a.getUtilization(), delta);
+ assertEquals(computeQueueUtilization(b, 0*GB, clusterResource),
+ b.getUtilization(), delta);
+ assertEquals(computeQueueUtilization(c, 1*GB, clusterResource),
+ c.getUtilization(), delta);
+ assertEquals(computeQueueUtilization(d, 0*GB, clusterResource),
+ d.getUtilization(), delta);
+ reset(a); reset(b); reset(c);
+
+ // Now get B2 to allocate
+ // A = 0/3, B = 0/15, C = 1/6, D=0/6
+ stubQueueAllocation(a, clusterResource, node_1, 0*GB);
+ stubQueueAllocation(b2, clusterResource, node_1, 4*GB);
+ stubQueueAllocation(c, clusterResource, node_1, 0*GB);
+ root.assignContainers(clusterResource, node_1);
+ assertEquals(computeQueueUtilization(a, 0*GB, clusterResource),
+ a.getUtilization(), delta);
+ assertEquals(computeQueueUtilization(b, 4*GB, clusterResource),
+ b.getUtilization(), delta);
+ assertEquals(computeQueueUtilization(c, 1*GB, clusterResource),
+ c.getUtilization(), delta);
+ reset(a); reset(b); reset(c);
+
+ // Now get both A1, C & B3 to allocate in right order
+ // A = 0/3, B = 4/15, C = 1/6, D=0/6
+ stubQueueAllocation(a1, clusterResource, node_0, 1*GB);
+ stubQueueAllocation(b3, clusterResource, node_0, 2*GB);
+ stubQueueAllocation(c, clusterResource, node_0, 2*GB);
+ root.assignContainers(clusterResource, node_0);
+ InOrder allocationOrder = inOrder(a, c, b);
+ allocationOrder.verify(a).assignContainers(eq(clusterResource),
+ any(SchedulerNode.class));
+ allocationOrder.verify(c).assignContainers(eq(clusterResource),
+ any(SchedulerNode.class));
+ allocationOrder.verify(b).assignContainers(eq(clusterResource),
+ any(SchedulerNode.class));
+ assertEquals(computeQueueUtilization(a, 1*GB, clusterResource),
+ a.getUtilization(), delta);
+ assertEquals(computeQueueUtilization(b, 6*GB, clusterResource),
+ b.getUtilization(), delta);
+ assertEquals(computeQueueUtilization(c, 3*GB, clusterResource),
+ c.getUtilization(), delta);
+ reset(a); reset(b); reset(c);
+
+ // Now verify max-capacity
+ // A = 1/3, B = 6/15, C = 3/6, D=0/6
+ // Ensure a1 won't alloc above max-cap although it should get
+ // scheduling opportunity now, right after a2
+ LOG.info("here");
+ ((ParentQueue)a).setMaxCapacity(.1f); // a should be capped at 3/30
+ stubQueueAllocation(a1, clusterResource, node_2, 1*GB); // shouldn't be
+ // allocated due
+ // to max-cap
+ stubQueueAllocation(a2, clusterResource, node_2, 2*GB);
+ stubQueueAllocation(b3, clusterResource, node_2, 1*GB);
+ stubQueueAllocation(b1, clusterResource, node_2, 1*GB);
+ stubQueueAllocation(c, clusterResource, node_2, 1*GB);
+ root.assignContainers(clusterResource, node_2);
+ allocationOrder = inOrder(a, a2, a1, b, c);
+ allocationOrder.verify(a).assignContainers(eq(clusterResource),
+ any(SchedulerNode.class));
+ allocationOrder.verify(a2).assignContainers(eq(clusterResource),
+ any(SchedulerNode.class));
+ allocationOrder.verify(b).assignContainers(eq(clusterResource),
+ any(SchedulerNode.class));
+ allocationOrder.verify(c).assignContainers(eq(clusterResource),
+ any(SchedulerNode.class));
+ assertEquals(computeQueueUtilization(a, 3*GB, clusterResource),
+ a.getUtilization(), delta);
+ assertEquals(computeQueueUtilization(b, 8*GB, clusterResource),
+ b.getUtilization(), delta);
+ assertEquals(computeQueueUtilization(c, 4*GB, clusterResource),
+ c.getUtilization(), delta);
+ reset(a); reset(b); reset(c);
+
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java
new file mode 100644
index 0000000..f353b78
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java
@@ -0,0 +1,107 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.junit.Test;
+
+public class TestQueueParsing {
+
+ private static final Log LOG = LogFactory.getLog(TestQueueParsing.class);
+
+ @Test
+ public void testQueueParsing() throws Exception {
+ CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
+ setupQueueConfiguration(conf);
+
+ CapacityScheduler capacityScheduler = new CapacityScheduler();
+ capacityScheduler.reinitialize(conf, null, null);
+ //capacityScheduler.g
+ }
+
+ private void setupQueueConfiguration(CapacitySchedulerConfiguration conf) {
+
+ // Define top-level queues
+ conf.setQueues(CapacityScheduler.ROOT, new String[] {"a", "b", "c"});
+ conf.setCapacity(CapacityScheduler.ROOT, 100);
+
+ final String A = CapacityScheduler.ROOT + ".a";
+ conf.setCapacity(A, 10);
+
+ final String B = CapacityScheduler.ROOT + ".b";
+ conf.setCapacity(B, 20);
+
+ final String C = CapacityScheduler.ROOT + ".c";
+ conf.setCapacity(C, 70);
+
+ LOG.info("Setup top-level queues");
+
+ // Define 2nd-level queues
+ final String A1 = A + ".a1";
+ final String A2 = A + ".a2";
+ conf.setQueues(A, new String[] {"a1", "a2"});
+ conf.setCapacity(A1, 30);
+ conf.setCapacity(A2, 70);
+
+ final String B1 = B + ".b1";
+ final String B2 = B + ".b2";
+ final String B3 = B + ".b3";
+ conf.setQueues(B, new String[] {"b1", "b2", "b3"});
+ conf.setCapacity(B1, 50);
+ conf.setCapacity(B2, 30);
+ conf.setCapacity(B3, 20);
+
+ final String C1 = C + ".c1";
+ final String C2 = C + ".c2";
+ final String C3 = C + ".c3";
+ final String C4 = C + ".c4";
+ conf.setQueues(C, new String[] {"c1", "c2", "c3", "c4"});
+ conf.setCapacity(C1, 50);
+ conf.setCapacity(C2, 10);
+ conf.setCapacity(C3, 35);
+ conf.setCapacity(C4, 5);
+
+ LOG.info("Setup 2nd-level queues");
+
+ // Define 3rd-level queues
+ final String C11 = C1 + ".c11";
+ final String C12 = C1 + ".c12";
+ final String C13 = C1 + ".c13";
+ conf.setQueues(C1, new String[] {"c11", "c12", "c13"});
+ conf.setCapacity(C11, 15);
+ conf.setCapacity(C12, 45);
+ conf.setCapacity(C13, 40);
+
+ LOG.info("Setup 3rd-level queues");
+ }
+
+ @Test (expected=java.lang.IllegalArgumentException.class)
+ public void testRootQueueParsing() throws Exception {
+ CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
+
+ // non-100 percent value will throw IllegalArgumentException
+ conf.setCapacity(CapacityScheduler.ROOT, 90);
+
+ CapacityScheduler capacityScheduler = new CapacityScheduler();
+ capacityScheduler.reinitialize(conf, null, null);
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
new file mode 100644
index 0000000..e389501
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
@@ -0,0 +1,146 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import static org.mockito.Mockito.*;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.Event;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+
+public class TestUtils {
+ private static final Log LOG = LogFactory.getLog(TestUtils.class);
+
+ /**
+ * Get a mock {@link RMContext} for use in test cases.
+ * @return a mock {@link RMContext} for use in test cases
+ */
+ @SuppressWarnings("rawtypes")
+ public static RMContext getMockRMContext() {
+ // Null dispatcher
+ Dispatcher nullDispatcher = new Dispatcher() {
+ private final EventHandler handler =
+ new EventHandler() {
+ @Override
+ public void handle(Event event) {
+ }
+ };
+ @Override
+ public void register(Class<? extends Enum> eventType,
+ EventHandler handler) {
+ }
+ @Override
+ public EventHandler getEventHandler() {
+ return handler;
+ }
+ };
+
+ // No op
+ ContainerAllocationExpirer cae =
+ new ContainerAllocationExpirer(nullDispatcher);
+
+ RMContext rmContext =
+ new RMContextImpl(null, nullDispatcher, cae, null);
+
+ return rmContext;
+ }
+
+ /**
+ * Hook to spy on queues.
+ */
+ static class SpyHook extends CapacityScheduler.QueueHook {
+ @Override
+ public Queue hook(Queue queue) {
+ return spy(queue);
+ }
+ }
+ public static SpyHook spyHook = new SpyHook();
+
+ private static final RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(null);
+
+ public static Priority createMockPriority( int priority) {
+// Priority p = mock(Priority.class);
+// when(p.getPriority()).thenReturn(priority);
+ Priority p = recordFactory.newRecordInstance(Priority.class);
+ p.setPriority(priority);
+ return p;
+ }
+
+ public static ResourceRequest createResourceRequest(
+ String hostName, int memory, int numContainers, Priority priority,
+ RecordFactory recordFactory) {
+ ResourceRequest request =
+ recordFactory.newRecordInstance(ResourceRequest.class);
+ Resource capability = Resources.createResource(memory);
+
+ request.setNumContainers(numContainers);
+ request.setHostName(hostName);
+ request.setCapability(capability);
+ request.setPriority(priority);
+ return request;
+ }
+
+ public static ApplicationAttemptId
+ getMockApplicationAttemptId(int appId, int attemptId) {
+ ApplicationId applicationId = mock(ApplicationId.class);
+ when(applicationId.getClusterTimestamp()).thenReturn(0L);
+ when(applicationId.getId()).thenReturn(appId);
+ ApplicationAttemptId applicationAttemptId = mock(ApplicationAttemptId.class);
+ when(applicationAttemptId.getApplicationId()).thenReturn(applicationId);
+ when(applicationAttemptId.getAttemptId()).thenReturn(attemptId);
+ return applicationAttemptId;
+ }
+
+ public static SchedulerNode getMockNode(
+ String host, String rack, int port, int capability) {
+ NodeId nodeId = mock(NodeId.class);
+ when(nodeId.getHost()).thenReturn(host);
+ when(nodeId.getPort()).thenReturn(port);
+ RMNode rmNode = mock(RMNode.class);
+ when(rmNode.getNodeID()).thenReturn(nodeId);
+ when(rmNode.getTotalCapability()).thenReturn(
+ Resources.createResource(capability));
+ when(rmNode.getNodeAddress()).thenReturn(host+":"+port);
+ when(rmNode.getHostName()).thenReturn(host);
+ when(rmNode.getRackName()).thenReturn(rack);
+
+ SchedulerNode node = spy(new SchedulerNode(rmNode));
+ LOG.info("node = " + host + " avail=" + node.getAvailableResource());
+ return node;
+ }
+
+ public static ContainerId getMockContainerId(SchedulerApp application) {
+ ContainerId containerId = mock(ContainerId.class);
+ doReturn(application.getApplicationAttemptId()).when(containerId).getAppAttemptId();
+ doReturn(application.getApplicationId()).when(containerId).getAppId();
+ doReturn(application.getNewContainerId()).when(containerId).getId();
+ return containerId;
+ }
+
+ public static Container getMockContainer(
+ ContainerId containerId, NodeId nodeId, Resource resource) {
+ Container container = mock(Container.class);
+ when(container.getId()).thenReturn(containerId);
+ when(container.getNodeId()).thenReturn(nodeId);
+ when(container.getResource()).thenReturn(resource);
+ return container;
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
new file mode 100644
index 0000000..81d2d3d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
@@ -0,0 +1,290 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo;
+
+import java.io.IOException;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.Application;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.Task;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.junit.After;
+import org.junit.Before;
+
+public class TestFifoScheduler {
+ private static final Log LOG = LogFactory.getLog(TestFifoScheduler.class);
+
+ private ResourceManager resourceManager = null;
+
+ @Before
+ public void setUp() throws Exception {
+ Store store = StoreFactory.getStore(new Configuration());
+ resourceManager = new ResourceManager(store);
+ resourceManager.init(new Configuration());
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ }
+
+ private org.apache.hadoop.yarn.server.resourcemanager.NodeManager
+ registerNode(String hostName, int containerManagerPort, int nmHttpPort,
+ String rackName, int memory) throws IOException {
+ return new org.apache.hadoop.yarn.server.resourcemanager.NodeManager(
+ hostName, containerManagerPort, nmHttpPort, rackName, memory,
+ resourceManager.getResourceTrackerService(), resourceManager
+ .getRMContext());
+ }
+
+
+
+// @Test
+ public void testFifoScheduler() throws Exception {
+
+ LOG.info("--- START: testFifoScheduler ---");
+
+ final int GB = 1024;
+
+ // Register node1
+ String host_0 = "host_0";
+ org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 =
+ registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, 4 * GB);
+ nm_0.heartbeat();
+
+ // Register node2
+ String host_1 = "host_1";
+ org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_1 =
+ registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, 2 * GB);
+ nm_1.heartbeat();
+
+ // ResourceRequest priorities
+ Priority priority_0 =
+ org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.create(0);
+ Priority priority_1 =
+ org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.create(1);
+
+ // Submit an application
+ Application application_0 = new Application("user_0", resourceManager);
+ application_0.submit();
+
+ application_0.addNodeManager(host_0, 1234, nm_0);
+ application_0.addNodeManager(host_1, 1234, nm_1);
+
+ Resource capability_0_0 = Resources.createResource(GB);
+ application_0.addResourceRequestSpec(priority_1, capability_0_0);
+
+ Resource capability_0_1 = Resources.createResource(2 * GB);
+ application_0.addResourceRequestSpec(priority_0, capability_0_1);
+
+ Task task_0_0 = new Task(application_0, priority_1,
+ new String[] {host_0, host_1});
+ application_0.addTask(task_0_0);
+
+ // Submit another application
+ Application application_1 = new Application("user_1", resourceManager);
+ application_1.submit();
+
+ application_1.addNodeManager(host_0, 1234, nm_0);
+ application_1.addNodeManager(host_1, 1234, nm_1);
+
+ Resource capability_1_0 = Resources.createResource(3 * GB);
+ application_1.addResourceRequestSpec(priority_1, capability_1_0);
+
+ Resource capability_1_1 = Resources.createResource(4 * GB);
+ application_1.addResourceRequestSpec(priority_0, capability_1_1);
+
+ Task task_1_0 = new Task(application_1, priority_1,
+ new String[] {host_0, host_1});
+ application_1.addTask(task_1_0);
+
+ // Send resource requests to the scheduler
+ LOG.info("Send resource requests to the scheduler");
+ application_0.schedule();
+ application_1.schedule();
+
+ // Send a heartbeat to kick the tires on the Scheduler
+ LOG.info("Send a heartbeat to kick the tires on the Scheduler... " +
+ "nm0 -> task_0_0 and task_1_0 allocated, used=4G " +
+ "nm1 -> nothing allocated");
+ nm_0.heartbeat(); // task_0_0 and task_1_0 allocated, used=4G
+ nm_1.heartbeat(); // nothing allocated
+
+ // Get allocations from the scheduler
+ application_0.schedule(); // task_0_0
+ checkApplicationResourceUsage(GB, application_0);
+
+ application_1.schedule(); // task_1_0
+ checkApplicationResourceUsage(3 * GB, application_1);
+
+ nm_0.heartbeat();
+ nm_1.heartbeat();
+
+ checkNodeResourceUsage(4*GB, nm_0); // task_0_0 (1G) and task_1_0 (3G)
+ checkNodeResourceUsage(0*GB, nm_1); // no tasks, 2G available
+
+ LOG.info("Adding new tasks...");
+
+ Task task_1_1 = new Task(application_1, priority_1,
+ new String[] {RMNode.ANY});
+ application_1.addTask(task_1_1);
+
+ Task task_1_2 = new Task(application_1, priority_1,
+ new String[] {RMNode.ANY});
+ application_1.addTask(task_1_2);
+
+ Task task_1_3 = new Task(application_1, priority_0,
+ new String[] {RMNode.ANY});
+ application_1.addTask(task_1_3);
+
+ application_1.schedule();
+
+ Task task_0_1 = new Task(application_0, priority_1,
+ new String[] {host_0, host_1});
+ application_0.addTask(task_0_1);
+
+ Task task_0_2 = new Task(application_0, priority_1,
+ new String[] {host_0, host_1});
+ application_0.addTask(task_0_2);
+
+ Task task_0_3 = new Task(application_0, priority_0,
+ new String[] {RMNode.ANY});
+ application_0.addTask(task_0_3);
+
+ application_0.schedule();
+
+ // Send a heartbeat to kick the tires on the Scheduler
+ LOG.info("Sending hb from " + nm_0.getHostName());
+ nm_0.heartbeat(); // nothing new, used=4G
+
+ LOG.info("Sending hb from " + nm_1.getHostName());
+ nm_1.heartbeat(); // task_0_3, used=2G
+
+ // Get allocations from the scheduler
+ LOG.info("Trying to allocate...");
+ application_0.schedule();
+ checkApplicationResourceUsage(3 * GB, application_0);
+ application_1.schedule();
+ checkApplicationResourceUsage(3 * GB, application_1);
+ nm_0.heartbeat();
+ nm_1.heartbeat();
+ checkNodeResourceUsage(4*GB, nm_0);
+ checkNodeResourceUsage(2*GB, nm_1);
+
+ // Complete tasks
+ LOG.info("Finishing up task_0_0");
+ application_0.finishTask(task_0_0); // Now task_0_1
+ application_0.schedule();
+ application_1.schedule();
+ nm_0.heartbeat();
+ nm_1.heartbeat();
+ checkApplicationResourceUsage(3 * GB, application_0);
+ checkApplicationResourceUsage(3 * GB, application_1);
+ checkNodeResourceUsage(4*GB, nm_0);
+ checkNodeResourceUsage(2*GB, nm_1);
+
+ LOG.info("Finishing up task_1_0");
+ application_1.finishTask(task_1_0); // Now task_0_2
+ application_0.schedule(); // final overcommit for app0 caused here
+ application_1.schedule();
+ nm_0.heartbeat(); // final overcommit for app0 occurs here
+ nm_1.heartbeat();
+ checkApplicationResourceUsage(4 * GB, application_0);
+ checkApplicationResourceUsage(0 * GB, application_1);
+ //checkNodeResourceUsage(1*GB, nm_0); // final over-commit -> rm.node->1G, test.node=2G
+ checkNodeResourceUsage(2*GB, nm_1);
+
+ LOG.info("Finishing up task_0_3");
+ application_0.finishTask(task_0_3); // No more
+ application_0.schedule();
+ application_1.schedule();
+ nm_0.heartbeat();
+ nm_1.heartbeat();
+ checkApplicationResourceUsage(2 * GB, application_0);
+ checkApplicationResourceUsage(0 * GB, application_1);
+ //checkNodeResourceUsage(2*GB, nm_0); // final over-commit, rm.node->1G, test.node->2G
+ checkNodeResourceUsage(0*GB, nm_1);
+
+ LOG.info("Finishing up task_0_1");
+ application_0.finishTask(task_0_1);
+ application_0.schedule();
+ application_1.schedule();
+ nm_0.heartbeat();
+ nm_1.heartbeat();
+ checkApplicationResourceUsage(1 * GB, application_0);
+ checkApplicationResourceUsage(0 * GB, application_1);
+
+ LOG.info("Finishing up task_0_2");
+ application_0.finishTask(task_0_2); // now task_1_3 can go!
+ application_0.schedule();
+ application_1.schedule();
+ nm_0.heartbeat();
+ nm_1.heartbeat();
+ checkApplicationResourceUsage(0 * GB, application_0);
+ checkApplicationResourceUsage(4 * GB, application_1);
+
+ LOG.info("Finishing up task_1_3");
+ application_1.finishTask(task_1_3); // now task_1_1
+ application_0.schedule();
+ application_1.schedule();
+ nm_0.heartbeat();
+ nm_1.heartbeat();
+ checkApplicationResourceUsage(0 * GB, application_0);
+ checkApplicationResourceUsage(3 * GB, application_1);
+
+ LOG.info("Finishing up task_1_1");
+ application_1.finishTask(task_1_1);
+ application_0.schedule();
+ application_1.schedule();
+ nm_0.heartbeat();
+ nm_1.heartbeat();
+ checkApplicationResourceUsage(0 * GB, application_0);
+ checkApplicationResourceUsage(3 * GB, application_1);
+
+ LOG.info("--- END: testFifoScheduler ---");
+ }
+
+ private void checkApplicationResourceUsage(int expected,
+ Application application) {
+ Assert.assertEquals(expected, application.getUsedResources().getMemory());
+ }
+
+ private void checkNodeResourceUsage(int expected,
+ org.apache.hadoop.yarn.server.resourcemanager.NodeManager node) {
+ Assert.assertEquals(expected, node.getUsed().getMemory());
+ node.checkResourceUsage();
+ }
+
+ public static void main(String[] arg) throws Exception {
+ TestFifoScheduler t = new TestFifoScheduler();
+ t.setUp();
+ t.testFifoScheduler();
+ t.tearDown();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
new file mode 100644
index 0000000..8ea9e80
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
@@ -0,0 +1,176 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import static org.apache.hadoop.yarn.server.resourcemanager.MockNodes.newResource;
+import static org.apache.hadoop.yarn.webapp.Params.TITLE;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.List;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.MockAsm;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.webapp.WebApps;
+import org.apache.hadoop.yarn.webapp.test.WebAppTests;
+import org.junit.Test;
+
+import com.google.common.collect.Maps;
+import com.google.inject.Injector;
+
+public class TestRMWebApp {
+ static final int GiB = 1024; // MiB
+
+ @Test public void testControllerIndex() {
+ Injector injector = WebAppTests.createMockInjector(this);
+ RmController c = injector.getInstance(RmController.class);
+ c.index();
+ assertEquals("Applications", c.get(TITLE, "unknown"));
+ }
+
+ @Test public void testView() {
+ Injector injector = WebAppTests.createMockInjector(RMContext.class,
+ mockRMContext(3, 1, 2, 8*GiB));
+ injector.getInstance(RmView.class).render();
+ WebAppTests.flushOutput(injector);
+ }
+
+ @Test public void testNodesPage() {
+ WebAppTests.testPage(NodesPage.class, RMContext.class,
+ mockRMContext(3, 1, 2, 8*GiB));
+ }
+
+ public static RMContext mockRMContext(int numApps, int racks, int numNodes,
+ int mbsPerNode) {
+ final List<RMApp> apps = MockAsm.newApplications(numApps);
+ final ConcurrentMap<ApplicationId, RMApp> applicationsMaps = Maps
+ .newConcurrentMap();
+ for (RMApp app : apps) {
+ applicationsMaps.put(app.getApplicationId(), app);
+ }
+ final List<RMNode> nodes = MockNodes.newNodes(racks, numNodes,
+ newResource(mbsPerNode));
+ final ConcurrentMap<NodeId, RMNode> nodesMap = Maps.newConcurrentMap();
+ for (RMNode node : nodes) {
+ nodesMap.put(node.getNodeID(), node);
+ }
+ return new RMContextImpl(new MemStore(), null, null, null) {
+ @Override
+ public ConcurrentMap<ApplicationId, RMApp> getRMApps() {
+ return applicationsMaps;
+ }
+ @Override
+ public ConcurrentMap<NodeId, RMNode> getRMNodes() {
+ return nodesMap;
+ }
+ };
+ }
+
+ public static ResourceManager mockRm(int apps, int racks, int nodes,
+ int mbsPerNode)
+ throws Exception {
+ ResourceManager rm = mock(ResourceManager.class);
+ RMContext rmContext = mockRMContext(apps, racks, nodes,
+ mbsPerNode);
+ ResourceScheduler rs = mockCapacityScheduler();
+ when(rm.getResourceScheduler()).thenReturn(rs);
+ when(rm.getRMContext()).thenReturn(rmContext);
+ return rm;
+ }
+
+ public static CapacityScheduler mockCapacityScheduler() throws Exception {
+ // stolen from TestCapacityScheduler
+ CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
+ setupQueueConfiguration(conf);
+
+ CapacityScheduler cs = new CapacityScheduler();
+ cs.reinitialize(conf, null, null);
+ return cs;
+ }
+
+ static void setupQueueConfiguration(CapacitySchedulerConfiguration conf) {
+ // Define top-level queues
+ conf.setQueues(CapacityScheduler.ROOT, new String[] {"a", "b", "c"});
+ conf.setCapacity(CapacityScheduler.ROOT, 100);
+
+ final String A = CapacityScheduler.ROOT + ".a";
+ conf.setCapacity(A, 10);
+
+ final String B = CapacityScheduler.ROOT + ".b";
+ conf.setCapacity(B, 20);
+
+ final String C = CapacityScheduler.ROOT + ".c";
+ conf.setCapacity(C, 70);
+
+ // Define 2nd-level queues
+ final String A1 = A + ".a1";
+ final String A2 = A + ".a2";
+ conf.setQueues(A, new String[] {"a1", "a2"});
+ conf.setCapacity(A1, 30);
+ conf.setCapacity(A2, 70);
+
+ final String B1 = B + ".b1";
+ final String B2 = B + ".b2";
+ final String B3 = B + ".b3";
+ conf.setQueues(B, new String[] {"b1", "b2", "b3"});
+ conf.setCapacity(B1, 50);
+ conf.setCapacity(B2, 30);
+ conf.setCapacity(B3, 20);
+
+ final String C1 = C + ".c1";
+ final String C2 = C + ".c2";
+ final String C3 = C + ".c3";
+ final String C4 = C + ".c4";
+ conf.setQueues(C, new String[] {"c1", "c2", "c3", "c4"});
+ conf.setCapacity(C1, 50);
+ conf.setCapacity(C2, 10);
+ conf.setCapacity(C3, 35);
+ conf.setCapacity(C4, 5);
+
+ // Define 3rd-level queues
+ final String C11 = C1 + ".c11";
+ final String C12 = C1 + ".c12";
+ final String C13 = C1 + ".c13";
+ conf.setQueues(C1, new String[] {"c11", "c12", "c13"});
+ conf.setCapacity(C11, 15);
+ conf.setCapacity(C12, 45);
+ conf.setCapacity(C13, 40);
+ }
+
+ public static void main(String[] args) throws Exception {
+ // For manual testing
+ WebApps.$for("yarn", new TestRMWebApp()).at(8888).inDevMode().
+ start(new RMWebApp(mockRm(101, 8, 8, 8*GiB))).joinThread();
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
new file mode 100644
index 0000000..fec7984
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
@@ -0,0 +1,48 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <artifactId>hadoop-yarn-server</artifactId>
+ <groupId>org.apache.hadoop</groupId>
+ <version>${yarn.version}</version>
+ </parent>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-tests</artifactId>
+ <name>hadoop-yarn-server-tests</name>
+
+ <properties>
+ <install.file>${project.artifact.file}</install.file>
+ <yarn.basedir>${project.parent.parent.basedir}</yarn.basedir>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-nodemanager</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-jar-plugin</artifactId>
+ <executions>
+ <execution>
+ <goals>
+ <goal>test-jar</goal>
+ </goals>
+ <phase>test-compile</phase>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
new file mode 100644
index 0000000..ef4f73a
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
@@ -0,0 +1,261 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.NodeHealthCheckerService;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
+import org.apache.hadoop.yarn.server.api.ResourceTracker;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.NMConfig;
+import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
+import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater;
+import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
+import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.service.CompositeService;
+
+public class MiniYARNCluster extends CompositeService {
+
+ private static final Log LOG = LogFactory.getLog(MiniYARNCluster.class);
+
+ // temp fix until metrics system can auto-detect itself running in unit test:
+ static {
+ DefaultMetricsSystem.setMiniClusterMode(true);
+ }
+
+ private NodeManager nodeManager;
+ private ResourceManager resourceManager;
+
+ private ResourceManagerWrapper resourceManagerWrapper;
+ private NodeManagerWrapper nodeManagerWrapper;
+
+ private File testWorkDir;
+
+ public MiniYARNCluster(String testName) {
+ super(testName);
+ this.testWorkDir = new File("target", testName);
+ try {
+ FileContext.getLocalFSFileContext().delete(
+ new Path(testWorkDir.getAbsolutePath()), true);
+ } catch (Exception e) {
+ LOG.warn("COULD NOT CLEANUP", e);
+ throw new YarnException("could not cleanup test dir", e);
+ }
+ resourceManagerWrapper = new ResourceManagerWrapper();
+ addService(resourceManagerWrapper);
+ nodeManagerWrapper = new NodeManagerWrapper();
+ addService(nodeManagerWrapper);
+ }
+
+ public File getTestWorkDir() {
+ return testWorkDir;
+ }
+
+ public ResourceManager getResourceManager() {
+ return this.resourceManager;
+ }
+
+ public NodeManager getNodeManager() {
+ return this.nodeManager;
+ }
+
+ private class ResourceManagerWrapper extends AbstractService {
+ public ResourceManagerWrapper() {
+ super(ResourceManagerWrapper.class.getName());
+ }
+
+ @Override
+ public synchronized void start() {
+ try {
+ Store store = StoreFactory.getStore(getConfig());
+ resourceManager = new ResourceManager(store) {
+ @Override
+ protected void doSecureLogin() throws IOException {
+ // Don't try to login using keytab in the testcase.
+ };
+ };
+ resourceManager.init(getConfig());
+ new Thread() {
+ public void run() {
+ resourceManager.start();
+ };
+ }.start();
+ int waitCount = 0;
+ while (resourceManager.getServiceState() == STATE.INITED
+ && waitCount++ < 60) {
+ LOG.info("Waiting for RM to start...");
+ Thread.sleep(1500);
+ }
+ if (resourceManager.getServiceState() != STATE.STARTED) {
+ // RM could have failed.
+ throw new IOException(
+ "ResourceManager failed to start. Final state is "
+ + resourceManager.getServiceState());
+ }
+ super.start();
+ } catch (Throwable t) {
+ throw new YarnException(t);
+ }
+ }
+
+ @Override
+ public synchronized void stop() {
+ if (resourceManager != null) {
+ resourceManager.stop();
+ }
+ super.stop();
+ }
+ }
+
+ private class NodeManagerWrapper extends AbstractService {
+ public NodeManagerWrapper() {
+ super(NodeManagerWrapper.class.getName());
+ }
+
+ public synchronized void start() {
+ try {
+ File localDir =
+ new File(testWorkDir, MiniYARNCluster.this.getName() + "-localDir");
+ localDir.mkdir();
+ LOG.info("Created localDir in " + localDir.getAbsolutePath());
+ getConfig().set(NMConfig.NM_LOCAL_DIR, localDir.getAbsolutePath());
+ File logDir =
+ new File(testWorkDir, MiniYARNCluster.this.getName()
+ + "-logDir");
+ File remoteLogDir =
+ new File(testWorkDir, MiniYARNCluster.this.getName()
+ + "-remoteLogDir");
+ logDir.mkdir();
+ remoteLogDir.mkdir();
+ LOG.info("Created logDir in " + logDir.getAbsolutePath());
+ getConfig().set(NMConfig.NM_LOG_DIR, logDir.getAbsolutePath());
+ getConfig().set(NMConfig.REMOTE_USER_LOG_DIR,
+ remoteLogDir.getAbsolutePath());
+ getConfig().setInt(NMConfig.NM_VMEM_GB, 4); // By default AM + 2 containers
+ nodeManager = new NodeManager() {
+
+ @Override
+ protected void doSecureLogin() throws IOException {
+ // Don't try to login using keytab in the testcase.
+ };
+
+ @Override
+ protected NodeStatusUpdater createNodeStatusUpdater(Context context,
+ Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
+ return new NodeStatusUpdaterImpl(context, dispatcher,
+ healthChecker, metrics) {
+ @Override
+ protected ResourceTracker getRMClient() {
+ final ResourceTrackerService rt = resourceManager
+ .getResourceTrackerService();
+ final RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(null);
+
+ // For in-process communication without RPC
+ return new ResourceTracker() {
+
+ @Override
+ public NodeHeartbeatResponse nodeHeartbeat(
+ NodeHeartbeatRequest request) throws YarnRemoteException {
+ NodeHeartbeatResponse response = recordFactory.newRecordInstance(
+ NodeHeartbeatResponse.class);
+ try {
+ response.setHeartbeatResponse(rt.nodeHeartbeat(request)
+ .getHeartbeatResponse());
+ } catch (IOException ioe) {
+ LOG.info("Exception in heartbeat from node " +
+ request.getNodeStatus().getNodeId(), ioe);
+ throw RPCUtil.getRemoteException(ioe);
+ }
+ return response;
+ }
+
+ @Override
+ public RegisterNodeManagerResponse registerNodeManager(
+ RegisterNodeManagerRequest request)
+ throws YarnRemoteException {
+ RegisterNodeManagerResponse response = recordFactory.newRecordInstance(
+ RegisterNodeManagerResponse.class);
+ try {
+ response.setRegistrationResponse(rt
+ .registerNodeManager(request)
+ .getRegistrationResponse());
+ } catch (IOException ioe) {
+ LOG.info("Exception in node registration from "
+ + request.getNodeId().toString(), ioe);
+ throw RPCUtil.getRemoteException(ioe);
+ }
+ return response;
+ }
+ };
+ };
+ };
+ };
+ };
+ nodeManager.init(getConfig());
+ new Thread() {
+ public void run() {
+ nodeManager.start();
+ };
+ }.start();
+ int waitCount = 0;
+ while (nodeManager.getServiceState() == STATE.INITED
+ && waitCount++ < 60) {
+ LOG.info("Waiting for NM to start...");
+ Thread.sleep(1000);
+ }
+ if (nodeManager.getServiceState() != STATE.STARTED) {
+ // RM could have failed.
+ throw new IOException("NodeManager failed to start");
+ }
+ super.start();
+ } catch (Throwable t) {
+ throw new YarnException(t);
+ }
+ }
+
+ @Override
+ public synchronized void stop() {
+ if (nodeManager != null) {
+ nodeManager.stop();
+ }
+ super.stop();
+ }
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerTokenSecretManager.java b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerTokenSecretManager.java
new file mode 100644
index 0000000..89e2b09
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerTokenSecretManager.java
@@ -0,0 +1,358 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server;
+
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.security.PrivilegedAction;
+import java.util.ArrayList;
+import java.util.List;
+
+import junit.framework.Assert;
+
+import org.apache.avro.AvroRuntimeException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.yarn.api.AMRMProtocol;
+import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationMaster;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerToken;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.security.ApplicationTokenIdentifier;
+import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager;
+import org.apache.hadoop.yarn.security.ContainerManagerSecurityInfo;
+import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
+import org.apache.hadoop.yarn.security.SchedulerSecurityInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestContainerTokenSecretManager {
+
+ private static Log LOG = LogFactory
+ .getLog(TestContainerTokenSecretManager.class);
+ private static final RecordFactory recordFactory = RecordFactoryProvider
+ .getRecordFactory(null);
+ private static FileContext localFS = null;
+ private static final File localDir = new File("target",
+ TestContainerTokenSecretManager.class.getName() + "-localDir")
+ .getAbsoluteFile();
+
+ @BeforeClass
+ public static void setup() throws AccessControlException,
+ FileNotFoundException, UnsupportedFileSystemException, IOException {
+ localFS = FileContext.getLocalFSFileContext();
+ localFS.delete(new Path(localDir.getAbsolutePath()), true);
+ localDir.mkdir();
+ }
+
+ @Test
+ public void test() throws IOException, InterruptedException {
+
+ final ApplicationId appID = recordFactory.newRecordInstance(ApplicationId.class);
+ appID.setClusterTimestamp(1234);
+ appID.setId(5);
+
+ final Configuration conf = new Configuration();
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+ "kerberos");
+ // Set AM expiry interval to be very long.
+ conf.setLong(YarnConfiguration.AM_EXPIRY_INTERVAL, 100000L);
+ UserGroupInformation.setConfiguration(conf);
+ MiniYARNCluster yarnCluster =
+ new MiniYARNCluster(TestContainerTokenSecretManager.class.getName());
+ yarnCluster.init(conf);
+ yarnCluster.start();
+
+ ResourceManager resourceManager = yarnCluster.getResourceManager();
+
+ final YarnRPC yarnRPC = YarnRPC.create(conf);
+
+ // Submit an application
+ ApplicationSubmissionContext appSubmissionContext =
+ recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
+ appSubmissionContext.setApplicationId(appID);
+ appSubmissionContext.setMasterCapability(recordFactory
+ .newRecordInstance(Resource.class));
+ appSubmissionContext.getMasterCapability().setMemory(1024);
+// appSubmissionContext.resources = new HashMap<String, URL>();
+ appSubmissionContext.setUser("testUser");
+// appSubmissionContext.environment = new HashMap<String, String>();
+// appSubmissionContext.command = new ArrayList<String>();
+ appSubmissionContext.addCommand("sleep");
+ appSubmissionContext.addCommand("100");
+
+ // TODO: Use a resource to work around bugs. Today NM doesn't create local
+ // app-dirs if there are no file to download!!
+ File file = new File(localDir.getAbsolutePath(), "testFile");
+ FileWriter tmpFile = new FileWriter(file);
+ tmpFile.write("testing");
+ tmpFile.close();
+ URL testFileURL =
+ ConverterUtils.getYarnUrlFromPath(FileContext.getFileContext()
+ .makeQualified(new Path(localDir.getAbsolutePath(), "testFile")));
+ LocalResource rsrc = recordFactory.newRecordInstance(LocalResource.class);
+ rsrc.setResource(testFileURL);
+ rsrc.setSize(file.length());
+ rsrc.setTimestamp(file.lastModified());
+ rsrc.setType(LocalResourceType.FILE);
+ rsrc.setVisibility(LocalResourceVisibility.PRIVATE);
+ appSubmissionContext.setResourceTodo("testFile", rsrc);
+ SubmitApplicationRequest submitRequest = recordFactory
+ .newRecordInstance(SubmitApplicationRequest.class);
+ submitRequest.setApplicationSubmissionContext(appSubmissionContext);
+ resourceManager.getClientRMService().submitApplication(submitRequest);
+
+ // Wait till container gets allocated for AM
+ int waitCounter = 0;
+ RMApp app = resourceManager.getRMContext().getRMApps().get(appID);
+ RMAppAttempt appAttempt = app == null ? null : app.getCurrentAppAttempt();
+ RMAppAttemptState state = appAttempt == null ? null : appAttempt
+ .getAppAttemptState();
+ while ((app == null || appAttempt == null || state == null
+ || !state.equals(RMAppAttemptState.LAUNCHED)) && waitCounter++ != 20) {
+ LOG.info("Waiting for applicationAttempt to be created.. ");
+ Thread.sleep(1000);
+ app = resourceManager.getRMContext().getRMApps().get(appID);
+ appAttempt = app == null ? null : app.getCurrentAppAttempt();
+ state = appAttempt == null ? null : appAttempt.getAppAttemptState();
+ }
+ Assert.assertNotNull(app);
+ Assert.assertNotNull(appAttempt);
+ Assert.assertNotNull(state);
+ Assert.assertEquals(RMAppAttemptState.LAUNCHED, state);
+
+ UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
+
+ // Ask for a container from the RM
+ String schedulerAddressString =
+ conf.get(YarnConfiguration.SCHEDULER_ADDRESS,
+ YarnConfiguration.DEFAULT_SCHEDULER_BIND_ADDRESS);
+ final InetSocketAddress schedulerAddr =
+ NetUtils.createSocketAddr(schedulerAddressString);
+ ApplicationTokenIdentifier appTokenIdentifier =
+ new ApplicationTokenIdentifier(appID);
+ ApplicationTokenSecretManager appTokenSecretManager =
+ new ApplicationTokenSecretManager();
+ appTokenSecretManager.setMasterKey(ApplicationTokenSecretManager
+ .createSecretKey("Dummy".getBytes())); // TODO: FIX. Be in Sync with
+ // ResourceManager.java
+ Token<ApplicationTokenIdentifier> appToken =
+ new Token<ApplicationTokenIdentifier>(appTokenIdentifier,
+ appTokenSecretManager);
+ appToken.setService(new Text(schedulerAddressString));
+ currentUser.addToken(appToken);
+
+ conf.setClass(
+ YarnConfiguration.YARN_SECURITY_INFO,
+ SchedulerSecurityInfo.class, SecurityInfo.class);
+ AMRMProtocol scheduler =
+ currentUser.doAs(new PrivilegedAction<AMRMProtocol>() {
+ @Override
+ public AMRMProtocol run() {
+ return (AMRMProtocol) yarnRPC.getProxy(AMRMProtocol.class,
+ schedulerAddr, conf);
+ }
+ });
+
+ // Register the appMaster
+ RegisterApplicationMasterRequest request =
+ recordFactory
+ .newRecordInstance(RegisterApplicationMasterRequest.class);
+ ApplicationMaster applicationMaster = recordFactory
+ .newRecordInstance(ApplicationMaster.class);
+ request.setApplicationAttemptId(resourceManager.getRMContext()
+ .getRMApps().get(appID).getCurrentAppAttempt().getAppAttemptId());
+ scheduler.registerApplicationMaster(request);
+
+ // Now request a container allocation.
+ List<ResourceRequest> ask = new ArrayList<ResourceRequest>();
+ ResourceRequest rr = recordFactory.newRecordInstance(ResourceRequest.class);
+ rr.setCapability(recordFactory.newRecordInstance(Resource.class));
+ rr.getCapability().setMemory(1024);
+ rr.setHostName("*");
+ rr.setNumContainers(1);
+ rr.setPriority(recordFactory.newRecordInstance(Priority.class));
+ rr.getPriority().setPriority(0);
+ ask.add(rr);
+ ArrayList<ContainerId> release = new ArrayList<ContainerId>();
+
+ AllocateRequest allocateRequest =
+ recordFactory.newRecordInstance(AllocateRequest.class);
+ allocateRequest.setApplicationAttemptId(appAttempt.getAppAttemptId());
+ allocateRequest.setResponseId(0);
+ allocateRequest.addAllAsks(ask);
+ allocateRequest.addAllReleases(release);
+ List<Container> allocatedContainers = scheduler.allocate(allocateRequest)
+ .getAMResponse().getNewContainerList();
+
+ waitCounter = 0;
+ while ((allocatedContainers == null || allocatedContainers.size() == 0)
+ && waitCounter++ != 20) {
+ LOG.info("Waiting for container to be allocated..");
+ Thread.sleep(1000);
+ allocateRequest.setResponseId(allocateRequest.getResponseId() + 1);
+ allocatedContainers =
+ scheduler.allocate(allocateRequest).getAMResponse()
+ .getNewContainerList();
+ }
+
+ Assert.assertNotNull("Container is not allocted!", allocatedContainers);
+ Assert.assertEquals("Didn't get one container!", 1,
+ allocatedContainers.size());
+
+ // Now talk to the NM for launching the container.
+ final Container allocatedContainer = allocatedContainers.get(0);
+ ContainerToken containerToken = allocatedContainer.getContainerToken();
+ Token<ContainerTokenIdentifier> token =
+ new Token<ContainerTokenIdentifier>(
+ containerToken.getIdentifier().array(),
+ containerToken.getPassword().array(), new Text(
+ containerToken.getKind()), new Text(
+ containerToken.getService()));
+ currentUser.addToken(token);
+ conf.setClass(
+ YarnConfiguration.YARN_SECURITY_INFO,
+ ContainerManagerSecurityInfo.class, SecurityInfo.class);
+ currentUser.doAs(new PrivilegedAction<Void>() {
+ @Override
+ public Void run() {
+ ContainerManager client = (ContainerManager) yarnRPC.getProxy(
+ ContainerManager.class, NetUtils
+ .createSocketAddr(allocatedContainer.getNodeId().toString()),
+ conf);
+ try {
+ LOG.info("Going to make a getContainerStatus() legal request");
+ GetContainerStatusRequest request =
+ recordFactory
+ .newRecordInstance(GetContainerStatusRequest.class);
+ ContainerId containerID =
+ recordFactory.newRecordInstance(ContainerId.class);
+ containerID.setAppId(appID);
+ containerID.setId(1);
+ request.setContainerId(containerID);
+ client.getContainerStatus(request);
+ } catch (YarnRemoteException e) {
+ LOG.info("Error", e);
+ } catch (AvroRuntimeException e) {
+ LOG.info("Got the expected exception");
+ }
+ return null;
+ }
+ });
+
+ UserGroupInformation maliceUser =
+ UserGroupInformation.createRemoteUser(currentUser.getShortUserName());
+ byte[] identifierBytes = containerToken.getIdentifier().array();
+ DataInputBuffer di = new DataInputBuffer();
+ di.reset(identifierBytes, identifierBytes.length);
+ ContainerTokenIdentifier dummyIdentifier = new ContainerTokenIdentifier();
+ dummyIdentifier.readFields(di);
+ Resource modifiedResource = recordFactory.newRecordInstance(Resource.class);
+ modifiedResource.setMemory(2048);
+ ContainerTokenIdentifier modifiedIdentifier =
+ new ContainerTokenIdentifier(dummyIdentifier.getContainerID(),
+ dummyIdentifier.getNmHostName(), modifiedResource);
+ // Malice user modifies the resource amount
+ Token<ContainerTokenIdentifier> modifiedToken =
+ new Token<ContainerTokenIdentifier>(modifiedIdentifier.getBytes(),
+ containerToken.getPassword().array(), new Text(
+ containerToken.getKind()), new Text(
+ containerToken.getService()));
+ maliceUser.addToken(modifiedToken);
+ maliceUser.doAs(new PrivilegedAction<Void>() {
+ @Override
+ public Void run() {
+ ContainerManager client = (ContainerManager) yarnRPC.getProxy(
+ ContainerManager.class, NetUtils
+ .createSocketAddr(allocatedContainer.getNodeId().toString()),
+ conf);
+ ContainerId containerID;
+
+ LOG.info("Going to contact NM: ilLegal request");
+ GetContainerStatusRequest request =
+ recordFactory
+ .newRecordInstance(GetContainerStatusRequest.class);
+ containerID =
+ recordFactory.newRecordInstance(ContainerId.class);
+ containerID.setAppId(appID);
+ containerID.setId(1);
+ request.setContainerId(containerID);
+ try {
+ client.getContainerStatus(request);
+ fail("Connection initiation with illegally modified "
+ + "tokens is expected to fail.");
+ } catch (YarnRemoteException e) {
+ LOG.error("Got exception", e);
+ fail("Cannot get a YARN remote exception as " +
+ "it will indicate RPC success");
+ } catch (Exception e) {
+ Assert.assertEquals(
+ java.lang.reflect.UndeclaredThrowableException.class
+ .getCanonicalName(), e.getClass().getCanonicalName());
+ Assert
+ .assertEquals(
+ "DIGEST-MD5: digest response format violation. Mismatched response.",
+ e.getCause().getCause().getMessage());
+ }
+ return null;
+ }
+ });
+ }
+}
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/resources/krb5.conf b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/resources/krb5.conf
new file mode 100644
index 0000000..121ac6d
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/resources/krb5.conf
@@ -0,0 +1,28 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+[libdefaults]
+ default_realm = APACHE.ORG
+ udp_preference_limit = 1
+ extra_addresses = 127.0.0.1
+[realms]
+ APACHE.ORG = {
+ admin_server = localhost:88
+ kdc = localhost:88
+ }
+[domain_realm]
+ localhost = APACHE.ORG
diff --git a/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/pom.xml b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/pom.xml
new file mode 100644
index 0000000..1d3e615
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/hadoop-yarn-server/pom.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<project>
+ <parent>
+ <artifactId>hadoop-yarn</artifactId>
+ <groupId>org.apache.hadoop</groupId>
+ <version>${yarn.version}</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server</artifactId>
+ <name>hadoop-yarn-server</name>
+ <packaging>pom</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-common</artifactId>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+
+ <modules>
+ <module>hadoop-yarn-server-common</module>
+ <module>hadoop-yarn-server-nodemanager</module>
+ <module>hadoop-yarn-server-resourcemanager</module>
+ <module>hadoop-yarn-server-tests</module>
+ </modules>
+</project>
diff --git a/hadoop-mapreduce/hadoop-yarn/pom.xml b/hadoop-mapreduce/hadoop-yarn/pom.xml
new file mode 100644
index 0000000..b0e4706
--- /dev/null
+++ b/hadoop-mapreduce/hadoop-yarn/pom.xml
@@ -0,0 +1,405 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn</artifactId>
+ <version>${yarn.version}</version>
+ <packaging>pom</packaging>
+ <name>hadoop-yarn</name>
+ <url>http://hadoop.apache.org/mapreduce</url>
+
+ <properties>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <test.logs>true</test.logs>
+ <test.timeout>600000</test.timeout>
+ <hadoop-common.version>0.23.0-SNAPSHOT</hadoop-common.version>
+ <hadoop-hdfs.version>0.23.0-SNAPSHOT</hadoop-hdfs.version>
+ <yarn.version>1.0-SNAPSHOT</yarn.version>
+ <install.pom>${project.build.directory}/saner-pom.xml</install.pom>
+ <install.file>${install.pom}</install.file>
+ <yarn.basedir>${basedir}</yarn.basedir>
+ </properties>
+
+ <repositories>
+ <repository>
+ <id>repository.jboss.org</id>
+ <url>http://repository.jboss.org/nexus/content/groups/public/</url>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </repository>
+ <repository>
+ <id>apache.snapshots</id>
+ <url>http://repository.apache.org/snapshots</url>
+ <!-- until we move to hadoop-common/hdfs trunk and/or maven 3 -->
+ <!-- cf. MNG-4326 -->
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.avro</groupId>
+ <artifactId>avro</artifactId>
+ <version>1.4.1</version>
+ <exclusions>
+ <exclusion>
+ <groupId>org.mortbay.jetty</groupId>
+ <artifactId>jetty</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.apache.ant</groupId>
+ <artifactId>ant</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.jboss.netty</groupId>
+ <artifactId>netty</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.apache.velocity</groupId>
+ <artifactId>velocity</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </exclusion>
+ <exclusion>
+ <artifactId>paranamer-ant</artifactId>
+ <groupId>com.thoughtworks.paranamer</groupId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>com.google.protobuf</groupId>
+ <artifactId>protobuf-java</artifactId>
+ <version>2.4.0a</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-common</artifactId>
+ <version>${hadoop-common.version}</version>
+ <exclusions>
+ <exclusion>
+ <groupId>org.apache.avro</groupId>
+ <artifactId>avro</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>commons-el</groupId>
+ <artifactId>commons-el</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>tomcat</groupId>
+ <artifactId>jasper-runtime</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>tomcat</groupId>
+ <artifactId>jasper-compiler</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.mortbay.jetty</groupId>
+ <artifactId>jsp-2.1-jetty</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>hsqldb</groupId>
+ <artifactId>hsqldb</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-annotations</artifactId>
+ <version>${hadoop-common.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <version>4.8.2</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-all</artifactId>
+ <version>1.8.5</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-common</artifactId>
+ <version>${hadoop-common.version}</version>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-hdfs</artifactId>
+ <version>${hadoop-hdfs.version}</version>
+ <scope>runtime</scope>
+ </dependency>
+ <dependency>
+ <groupId>com.google.inject.extensions</groupId>
+ <artifactId>guice-servlet</artifactId>
+ <version>2.0</version>
+ </dependency>
+ <dependency>
+ <groupId>org.jboss.netty</groupId>
+ <artifactId>netty</artifactId>
+ <version>3.2.3.Final</version>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ <version>1.6.1</version>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-log4j12</artifactId>
+ <version>1.6.1</version>
+ </dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <version>4.8.2</version>
+ </dependency>
+ </dependencies>
+
+ <dependencyManagement>
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-api</artifactId>
+ <version>${yarn.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-common</artifactId>
+ <version>${yarn.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-common</artifactId>
+ <version>${yarn.version}</version>
+ <type>test-jar</type>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-common</artifactId>
+ <version>${yarn.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
+ <version>${yarn.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-nodemanager</artifactId>
+ <version>${yarn.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.zookeeper</groupId>
+ <artifactId>zookeeper</artifactId>
+ <version>3.3.1</version>
+ <exclusions>
+ <exclusion>
+ <!-- otherwise seems to drag in junit 3.8.1 via jline -->
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.sun.jdmk</groupId>
+ <artifactId>jmxtools</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.sun.jmx</groupId>
+ <artifactId>jmxri</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ </dependencies>
+ </dependencyManagement>
+
+ <build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>findbugs-maven-plugin</artifactId>
+ <version>2.3.2</version>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <!-- pre 2.1 ignores project.build.sourceEncoding -->
+ <version>2.3.2</version>
+ <configuration>
+ <source>1.6</source>
+ <target>1.6</target>
+ </configuration>
+ </plugin>
+ <plugin>
+ <artifactId>maven-clean-plugin</artifactId>
+ <version>2.4.1</version>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <!-- requires 2.5+ to make system properties work -->
+ <!-- requires 2.7+ to avoid SUREFIRE-640 -->
+ <version>2.7.2</version>
+ <configuration>
+ <failIfNoTests>false</failIfNoTests>
+ <redirectTestOutputToFile>${test.logs}</redirectTestOutputToFile>
+ <forkedProcessTimeoutInSeconds>${test.timeout}</forkedProcessTimeoutInSeconds>
+ <environmentVariables>
+ <JAVA_HOME>${java.home}</JAVA_HOME>
+ </environmentVariables>
+ <systemPropertyVariables>
+ <build.dir>${project.build.directory}</build.dir>
+ <build.output.dir>${project.build.outputDirectory}</build.output.dir>
+ </systemPropertyVariables>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>com.atlassian.maven.plugins</groupId>
+ <artifactId>maven-clover2-plugin</artifactId>
+ <version>3.0.2</version>
+ <configuration>
+ <licenseLocation>/home/y/conf/clover/clover.license</licenseLocation>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <version>1.6</version>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <version>1.2</version>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <version>1.5</version>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-install-plugin</artifactId>
+ <version>2.3.1</version>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-jar-plugin</artifactId>
+ <version>2.3.1</version>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-source-plugin</artifactId>
+ <version>2.1.2</version>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ <plugins>
+ <plugin>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <configuration>
+ <tarLongFileMode>gnu</tarLongFileMode>
+ <descriptors>
+ <descriptor>assembly/all.xml</descriptor>
+ </descriptors>
+ </configuration>
+ </plugin>
+ <plugin>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>santize-pom</id>
+ <phase>package</phase>
+ <configuration>
+ <target>
+ <echo message="project.build.directory: ${project.build.directory}"/>
+ <copy file="pom.xml" tofile="${install.pom}">
+ <filterchain>
+ <!-- we'll have to wait for ant 1.8.3 for the following
+ <expandproperties>
+ <propertyset regex=".*version$">
+ </propertyset>
+ </expandproperties>
+ until then an even uglier workaround: -->
+ <tokenfilter>
+ <replaceregex pattern="\$\{hadoop-common.version}"
+ replace="${hadoop-common.version}" flags="g"/>
+ <replaceregex pattern="\$\{hadoop-hdfs.version}"
+ replace="${hadoop-hdfs.version}" flags="g"/>
+ <replaceregex pattern="\$\{yarn.version}"
+ replace="${yarn.version}" flags="g"/>
+ </tokenfilter>
+ </filterchain>
+ </copy>
+ </target>
+ </configuration>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-install-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>install-sanitized-pom</id>
+ <configuration>
+ <file>${install.file}</file>
+ <pomFile>${install.pom}</pomFile>
+ </configuration>
+ <phase>install</phase>
+ <goals>
+ <goal>install-file</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>findbugs-maven-plugin</artifactId>
+ <configuration>
+ <findbugsXmlOutput>true</findbugsXmlOutput>
+ <xmlOutput>true</xmlOutput>
+ <excludeFilterFile>${yarn.basedir}/dev-support/findbugs-exclude.xml</excludeFilterFile>
+ <effort>Max</effort>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+
+ <profiles>
+ <profile>
+ <id>release</id>
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-source-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>attach-sources</id>
+ <goals>
+ <!-- avoid warning about recursion -->
+ <goal>jar-no-fork</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+ </profiles>
+
+ <modules>
+ <module>hadoop-yarn-api</module>
+ <module>hadoop-yarn-common</module>
+ <module>hadoop-yarn-server</module>
+ </modules>
+</project>
diff --git a/hadoop-mapreduce/ivy.xml b/hadoop-mapreduce/ivy.xml
new file mode 100644
index 0000000..4f67743
--- /dev/null
+++ b/hadoop-mapreduce/ivy.xml
@@ -0,0 +1,147 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<ivy-module version="1.0">
+ <info organisation="org.apache.hadoop" module="${ant.project.name}" revision="${version}">
+ <license name="Apache 2.0"/>
+ <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
+ <description>
+ Hadoop Core
+ </description>
+ </info>
+ <configurations defaultconfmapping="default">
+ <!--these match the Maven configurations-->
+ <conf name="default" extends="master,runtime"/>
+ <conf name="master" description="contains the artifact but no dependencies"/>
+ <conf name="compile" description="contains the artifact but no dependencies"/>
+ <conf name="runtime" description="runtime but not the artifact"/>
+
+ <!--
+ These public configurations contain the core dependencies for running hadoop client or server.
+ The server is effectively a superset of the client.
+ -->
+ <!--Private configurations. -->
+
+ <conf name="common" visibility="private" extends="compile" description="common artifacts"/>
+ <conf name="mapred" visibility="private" extends="compile,runtime" description="Mapred dependent artifacts"/>
+ <conf name="javadoc" visibility="private" description="artiracts required while performing doc generation" extends="common"/>
+ <conf name="test" extends="master" visibility="private" description="the classpath needed to run tests"/>
+ <conf name="package" extends="master" description="the classpath needed for packaging"/>
+ <conf name="system" extends="test" visibility="private" description="the classpath needed to run system tests"/>
+
+ <conf name="test-hdfswithmr" extends="test" visibility="private" description="the classpath needed to run tests"/>
+
+ <conf name="releaseaudit" visibility="private" description="Artifacts required for releaseaudit target"/>
+
+ <conf name="jdiff" visibility="private" extends="common"/>
+ <conf name="checkstyle" visibility="private"/>
+
+ </configurations>
+
+ <publications>
+ <!--get the artifact from our module name-->
+ <artifact conf="master"/>
+ </publications>
+ <dependencies>
+ <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="compile->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-common"
+ rev="${hadoop-common.version}" conf="compile->default" />
+ <dependency org="org.apache.hadoop" name="hadoop-common-test"
+ rev="${hadoop-common.version}" conf="compile->default" />
+ <dependency org="org.apache.hadoop" name="hadoop-hdfs"
+ rev="${hadoop-hdfs.version}" conf="compile->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-common-instrumented"
+ rev="${hadoop-common.version}" conf="system->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-hdfs-instrumented"
+ rev="${hadoop-common.version}" conf="system->default"/>
+ <dependency org="commons-logging" name="commons-logging"
+ rev="${commons-logging.version}" conf="compile->master"/>
+ <!-- <dependency org="log4j" name="log4j" rev="${log4j.version}"
+ conf="compile->master"/>-->
+
+ <dependency org="org.slf4j" name="slf4j-api" rev="${slf4j-api.version}"
+ conf="compile->master"/>
+ <dependency org="org.slf4j" name="slf4j-log4j12"
+ rev="${slf4j-log4j12.version}" conf="mapred->master"/>
+ <dependency org="org.apache.hadoop" name="hadoop-common-test"
+ rev="${hadoop-common.version}" conf="test->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-hdfs-test"
+ rev="${hadoop-hdfs.version}" conf="test->default"/>
+
+ <dependency org="org.apache.hadoop" name="hadoop-yarn-server-common"
+ rev="${yarn.version}" conf="compile->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-mapreduce-client-core"
+ rev="${yarn.version}" conf="compile->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-yarn-common"
+ rev="${yarn.version}" conf="compile->default"/>
+ <dependency org="log4j" name="log4j" rev="${log4j.version}"
+ conf="compile->master"/>
+
+ <!--
+ <dependency org="org.apache.hadoop" name="hadoop-yarn-common"
+ rev="${yarn.version}" conf="test->default">
+ <artifact name="hadoop-yarn-common" type="jar" ext="jar" classifier="tests"/>
+ </dependency>
+ -->
+
+ <dependency org="checkstyle" name="checkstyle" rev="${checkstyle.version}"
+ conf="checkstyle->default"/>
+
+ <dependency org="jdiff" name="jdiff" rev="${jdiff.version}"
+ conf="jdiff->default"/>
+ <dependency org="xerces" name="xerces" rev="${xerces.version}"
+ conf="jdiff->default"/>
+
+ <dependency org="org.apache.rat" name="apache-rat-tasks"
+ rev="${rats-lib.version}" conf="releaseaudit->default"/>
+ <dependency org="commons-lang" name="commons-lang"
+ rev="${commons-lang.version}" conf="releaseaudit->default"/>
+ <dependency org="commons-collections" name="commons-collections"
+ rev="${commons-collections.version}"
+ conf="releaseaudit->default"/>
+
+ <dependency org="org.apache.lucene" name="lucene-core"
+ rev="${lucene-core.version}" conf="javadoc->default"/>
+ <dependency org="org.apache.avro" name="avro" rev="${avro.version}"
+ conf="compile->default">
+ <exclude module="ant"/>
+ <exclude module="jetty"/>
+ <exclude module="slf4j-simple"/>
+ </dependency>
+ <dependency org="junit" name="junit" rev="${junit.version}"
+ conf="test->default"/>
+ <dependency org="org.mockito" name="mockito-all" rev="${mockito-all.version}"
+ conf="test->default"/>
+ <dependency org="org.vafer" name="jdeb" rev="${jdeb.version}" conf="package->master"/>
+ <dependency org="org.mortbay.jetty" name="jetty-servlet-tester" rev="${jetty.version}"
+ conf="test->default"/>
+ <!-- dependency addition for the fault injection -->
+ <dependency org="org.aspectj" name="aspectjrt" rev="${aspectj.version}"
+ conf="compile->default"/>
+ <dependency org="org.aspectj" name="aspectjtools" rev="${aspectj.version}"
+ conf="compile->default"/>
+
+ <!-- Exclusions for transitive dependencies pulled in by log4j -->
+ <exclude org="com.sun.jdmk"/>
+ <exclude org="com.sun.jmx"/>
+ <exclude org="javax.jms"/>
+ <exclude org="javax.mail"/>
+ <exclude org="org.apache.hadoop" module="avro"/>
+
+ </dependencies>
+
+</ivy-module>
diff --git a/mapreduce/ivy/hadoop-mapred-examples-template.xml b/hadoop-mapreduce/ivy/hadoop-mapred-examples-template.xml
similarity index 100%
rename from mapreduce/ivy/hadoop-mapred-examples-template.xml
rename to hadoop-mapreduce/ivy/hadoop-mapred-examples-template.xml
diff --git a/mapreduce/ivy/hadoop-mapred-instrumented-template.xml b/hadoop-mapreduce/ivy/hadoop-mapred-instrumented-template.xml
similarity index 100%
rename from mapreduce/ivy/hadoop-mapred-instrumented-template.xml
rename to hadoop-mapreduce/ivy/hadoop-mapred-instrumented-template.xml
diff --git a/mapreduce/ivy/hadoop-mapred-instrumented-test-template.xml b/hadoop-mapreduce/ivy/hadoop-mapred-instrumented-test-template.xml
similarity index 100%
rename from mapreduce/ivy/hadoop-mapred-instrumented-test-template.xml
rename to hadoop-mapreduce/ivy/hadoop-mapred-instrumented-test-template.xml
diff --git a/mapreduce/ivy/hadoop-mapred-template.xml b/hadoop-mapreduce/ivy/hadoop-mapred-template.xml
similarity index 100%
rename from mapreduce/ivy/hadoop-mapred-template.xml
rename to hadoop-mapreduce/ivy/hadoop-mapred-template.xml
diff --git a/mapreduce/ivy/hadoop-mapred-test-template.xml b/hadoop-mapreduce/ivy/hadoop-mapred-test-template.xml
similarity index 100%
rename from mapreduce/ivy/hadoop-mapred-test-template.xml
rename to hadoop-mapreduce/ivy/hadoop-mapred-test-template.xml
diff --git a/mapreduce/ivy/hadoop-mapred-tools-template.xml b/hadoop-mapreduce/ivy/hadoop-mapred-tools-template.xml
similarity index 100%
rename from mapreduce/ivy/hadoop-mapred-tools-template.xml
rename to hadoop-mapreduce/ivy/hadoop-mapred-tools-template.xml
diff --git a/mapreduce/ivy/ivysettings.xml b/hadoop-mapreduce/ivy/ivysettings.xml
similarity index 100%
rename from mapreduce/ivy/ivysettings.xml
rename to hadoop-mapreduce/ivy/ivysettings.xml
diff --git a/hadoop-mapreduce/ivy/libraries.properties b/hadoop-mapreduce/ivy/libraries.properties
new file mode 100644
index 0000000..9891dcb
--- /dev/null
+++ b/hadoop-mapreduce/ivy/libraries.properties
@@ -0,0 +1,85 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#This properties file lists the versions of the various artifacts used by hadoop and components.
+#It drives ivy and the generation of a maven POM
+
+#These are the versions of our dependencies (in alphabetical order)
+ant-task.version=2.0.10
+
+#Aspectj depedency for Fault injection
+#This property has to be updated synchronously with aop.xml
+aspectj.version=1.6.5
+
+avro.version=1.4.1
+
+checkstyle.version=4.2
+
+commons-cli.version=1.2
+commons-collections.version=3.1
+commons-httpclient.version=3.1
+commons-lang.version=2.5
+commons-logging.version=1.1.1
+commons-logging-api.version=1.1
+commons-el.version=1.0
+commons-fileupload.version=1.2
+commons-io.version=1.4
+commons-net.version=1.4.1
+core.version=3.1.1
+coreplugin.version=1.3.2
+
+ftplet-api.version=1.0.0
+ftpserver-core.version=1.0.0
+ftpserver-deprecated.version=1.0.0-M2
+
+hadoop-common.version=0.23.0-SNAPSHOT
+hadoop-hdfs.version=0.23.0-SNAPSHOT
+
+hsqldb.version=1.8.0.10
+
+ivy.version=2.1.0
+
+jasper.version=5.5.12
+jdeb.version=0.8
+jsp.version=2.1
+jsp-api.version=5.5.12
+jets3t.version=0.7.1
+jetty.version=6.1.14
+jetty-util.version=6.1.14
+junit.version=4.8.1
+jdiff.version=1.0.9
+
+kfs.version=0.3
+
+log4j.version=1.2.15
+lucene-core.version=2.3.1
+
+mina-core.version=2.0.0-M5
+
+mockito-all.version=1.8.2
+
+oro.version=2.0.8
+
+rats-lib.version=0.6
+
+servlet.version=4.0.6
+servlet-api-2.5.version=6.1.14
+servlet-api.version=2.5
+slf4j-api.version=1.5.11
+slf4j-log4j12.version=1.5.11
+
+wagon-http.version=1.0-beta-2
+xmlenc.version=0.52
+xerces.version=1.4.4
+
+yarn.version=1.0-SNAPSHOT
+hadoop-mapreduce.version=1.0-SNAPSHOT
diff --git a/mapreduce/lib/jdiff/hadoop-mapred_0.20.0.xml b/hadoop-mapreduce/lib/jdiff/hadoop-mapred_0.20.0.xml
similarity index 100%
rename from mapreduce/lib/jdiff/hadoop-mapred_0.20.0.xml
rename to hadoop-mapreduce/lib/jdiff/hadoop-mapred_0.20.0.xml
diff --git a/mapreduce/lib/jdiff/hadoop-mapred_0.21.0.xml b/hadoop-mapreduce/lib/jdiff/hadoop-mapred_0.21.0.xml
similarity index 100%
rename from mapreduce/lib/jdiff/hadoop-mapred_0.21.0.xml
rename to hadoop-mapreduce/lib/jdiff/hadoop-mapred_0.21.0.xml
diff --git a/hadoop-mapreduce/pom.xml b/hadoop-mapreduce/pom.xml
new file mode 100644
index 0000000..f91ec01
--- /dev/null
+++ b/hadoop-mapreduce/pom.xml
@@ -0,0 +1,388 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce</artifactId>
+ <version>${hadoop-mapreduce.version}</version>
+ <packaging>pom</packaging>
+ <name>hadoop-mapreduce</name>
+ <url>http://hadoop.apache.org/mapreduce/</url>
+
+ <properties>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <test.logs>true</test.logs>
+ <test.timeout>600000</test.timeout>
+ <hadoop-common.version>0.23.0-SNAPSHOT</hadoop-common.version>
+ <hadoop-hdfs.version>0.23.0-SNAPSHOT</hadoop-hdfs.version>
+ <hadoop-mapreduce.version>1.0-SNAPSHOT</hadoop-mapreduce.version>
+ <yarn.version>1.0-SNAPSHOT</yarn.version>
+ <install.pom>${project.build.directory}/saner-pom.xml</install.pom>
+ <install.file>${install.pom}</install.file>
+ <fork.mode>once</fork.mode>
+ <mr.basedir>${basedir}</mr.basedir>
+ </properties>
+
+ <repositories>
+ <repository>
+ <id>repository.jboss.org</id>
+ <url>http://repository.jboss.org/nexus/content/groups/public/</url>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </repository>
+ <repository>
+ <id>apache.snapshots</id>
+ <url>http://repository.apache.org/snapshots</url>
+ <!-- until we move to hadoop-common/hdfs trunk and/or maven 3 -->
+ <!-- cf. MNG-4326 -->
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.google.protobuf</groupId>
+ <artifactId>protobuf-java</artifactId>
+ <version>2.4.0a</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.avro</groupId>
+ <artifactId>avro</artifactId>
+ <version>1.4.1</version>
+ <exclusions>
+ <exclusion>
+ <groupId>org.mortbay.jetty</groupId>
+ <artifactId>jetty</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.apache.ant</groupId>
+ <artifactId>ant</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.jboss.netty</groupId>
+ <artifactId>netty</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.apache.velocity</groupId>
+ <artifactId>velocity</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </exclusion>
+ <exclusion>
+ <artifactId>paranamer-ant</artifactId>
+ <groupId>com.thoughtworks.paranamer</groupId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-common</artifactId>
+ <version>${hadoop-common.version}</version>
+ <exclusions>
+ <exclusion>
+ <groupId>commons-el</groupId>
+ <artifactId>commons-el</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>tomcat</groupId>
+ <artifactId>jasper-runtime</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>tomcat</groupId>
+ <artifactId>jasper-compiler</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.mortbay.jetty</groupId>
+ <artifactId>jsp-2.1-jetty</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>hsqldb</groupId>
+ <artifactId>hsqldb</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ <version>1.6.1</version>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-log4j12</artifactId>
+ <version>1.6.1</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-annotations</artifactId>
+ <version>${hadoop-common.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-all</artifactId>
+ <version>1.8.5</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-common</artifactId>
+ <version>${hadoop-common.version}</version>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <!-- needed for security and runtime -->
+ <artifactId>hadoop-hdfs</artifactId>
+ <version>${hadoop-hdfs.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>com.google.inject.extensions</groupId>
+ <artifactId>guice-servlet</artifactId>
+ <version>2.0</version>
+ </dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <version>4.8.2</version>
+ </dependency>
+ <dependency>
+ <groupId>org.jboss.netty</groupId>
+ <artifactId>netty</artifactId>
+ <version>3.2.3.Final</version>
+ </dependency>
+ <dependency>
+ <groupId>com.cenqua.clover</groupId>
+ <artifactId>clover</artifactId>
+ <version>3.0.2</version>
+ </dependency>
+
+ </dependencies>
+
+ <build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>findbugs-maven-plugin</artifactId>
+ <version>2.3.2</version>
+ </plugin>
+ <plugin>
+ <artifactId>maven-clean-plugin</artifactId>
+ <version>2.4.1</version>
+ </plugin>
+ <plugin>
+ <groupId>com.atlassian.maven.plugins</groupId>
+ <artifactId>maven-clover2-plugin</artifactId>
+ <version>3.0.2</version>
+ <configuration>
+ <licenseLocation>/home/y/conf/clover/clover.license</licenseLocation>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <!-- pre 2.1 ignores project.build.sourceEncoding -->
+ <version>2.3.2</version>
+ <configuration>
+ <source>1.6</source>
+ <target>1.6</target>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <!-- requires 2.5+ to make system properties work -->
+ <!-- requires 2.7+ to avoid SUREFIRE-640 -->
+ <version>2.7.2</version>
+ <configuration>
+ <failIfNoTests>false</failIfNoTests>
+ <redirectTestOutputToFile>${test.logs}</redirectTestOutputToFile>
+ <forkedProcessTimeoutInSeconds>${test.timeout}</forkedProcessTimeoutInSeconds>
+ <forkMode>${fork.mode}</forkMode>
+ <environmentVariables>
+ <JAVA_HOME>${java.home}</JAVA_HOME>
+ </environmentVariables>
+ <systemPropertyVariables>
+ <build.dir>${project.build.directory}</build.dir>
+ <build.output.dir>${project.build.outputDirectory}</build.output.dir>
+ <log4j.configuration>file:/home/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop-common/mapreduce/src/test/log4j.properties</log4j.configuration>
+ </systemPropertyVariables>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <version>2.2.1</version>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <version>1.6</version>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <version>1.2</version>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <version>1.5</version>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-install-plugin</artifactId>
+ <version>2.3.1</version>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-jar-plugin</artifactId>
+ <version>2.3.1</version>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-source-plugin</artifactId>
+ <version>2.1.2</version>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ <plugins>
+ <plugin>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>santize-pom</id>
+ <phase>package</phase>
+ <configuration>
+ <target>
+ <copy file="pom.xml" tofile="${install.pom}">
+ <filterchain>
+ <!-- we'll have to wait for ant 1.8.3 for the following
+ <expandproperties>
+ <propertyset regex=".*version$">
+ </propertyset>
+ </expandproperties>
+ until then an even uglier workaround: -->
+ <tokenfilter>
+ <replaceregex pattern="\$\{hadoop-common.version}"
+ replace="${hadoop-common.version}" flags="g"/>
+ <replaceregex pattern="\$\{hadoop-hdfs.version}"
+ replace="${hadoop-hdfs.version}" flags="g"/>
+ <replaceregex pattern="\$\{hadoop-mapreduce.version}"
+ replace="${hadoop-mapreduce.version}" flags="g"/>
+ <replaceregex pattern="\$\{yarn.version}"
+ replace="${yarn.version}" flags="g"/>
+ </tokenfilter>
+ </filterchain>
+ </copy>
+ </target>
+ </configuration>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-install-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>install-sanitized-pom</id>
+ <configuration>
+ <file>${install.file}</file>
+ <pomFile>${install.pom}</pomFile>
+ </configuration>
+ <phase>install</phase>
+ <goals>
+ <goal>install-file</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <configuration>
+ <tarLongFileMode>gnu</tarLongFileMode>
+ <descriptors>
+ <descriptor>assembly/all.xml</descriptor>
+ </descriptors>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>com.atlassian.maven.plugins</groupId>
+ <artifactId>maven-clover2-plugin</artifactId>
+ <executions>
+ <execution>
+ <goals>
+ <goal>instrument</goal>
+ <goal>aggregate</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>findbugs-maven-plugin</artifactId>
+ <configuration>
+ <findbugsXmlOutput>true</findbugsXmlOutput>
+ <xmlOutput>true</xmlOutput>
+ <excludeFilterFile>${mr.basedir}/dev-support/findbugs-exclude.xml</excludeFilterFile>
+ <effort>Max</effort>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+
+ <profiles>
+ <profile>
+ <id>release</id>
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-source-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>attach-sources</id>
+ <goals>
+ <!-- avoid warning about recursion -->
+ <goal>jar-no-fork</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+ </profiles>
+
+ <modules>
+ <module>hadoop-yarn</module>
+ <module>hadoop-mr-client</module>
+ </modules>
+
+ <reporting>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>findbugs-maven-plugin</artifactId>
+ <!-- until we have reporting management cf. MSITE-443 -->
+ <version>2.3.2</version>
+ <configuration>
+ <findbugsXmlOutput>true</findbugsXmlOutput>
+ <xmlOutput>true</xmlOutput>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>com.atlassian.maven.plugins</groupId>
+ <artifactId>maven-clover2-plugin</artifactId>
+ <!-- until we have reporting management cf. MSITE-443 -->
+ <version>3.0.2</version>
+ </plugin>
+ </plugins>
+ </reporting>
+</project>
diff --git a/mapreduce/src/benchmarks/gridmix/README b/hadoop-mapreduce/src/benchmarks/gridmix/README
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/README
rename to hadoop-mapreduce/src/benchmarks/gridmix/README
diff --git a/mapreduce/src/benchmarks/gridmix/generateData.sh b/hadoop-mapreduce/src/benchmarks/gridmix/generateData.sh
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/generateData.sh
rename to hadoop-mapreduce/src/benchmarks/gridmix/generateData.sh
diff --git a/mapreduce/src/benchmarks/gridmix/gridmix-env b/hadoop-mapreduce/src/benchmarks/gridmix/gridmix-env
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/gridmix-env
rename to hadoop-mapreduce/src/benchmarks/gridmix/gridmix-env
diff --git a/mapreduce/src/benchmarks/gridmix/javasort/text-sort.large b/hadoop-mapreduce/src/benchmarks/gridmix/javasort/text-sort.large
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/javasort/text-sort.large
rename to hadoop-mapreduce/src/benchmarks/gridmix/javasort/text-sort.large
diff --git a/mapreduce/src/benchmarks/gridmix/javasort/text-sort.medium b/hadoop-mapreduce/src/benchmarks/gridmix/javasort/text-sort.medium
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/javasort/text-sort.medium
rename to hadoop-mapreduce/src/benchmarks/gridmix/javasort/text-sort.medium
diff --git a/mapreduce/src/benchmarks/gridmix/javasort/text-sort.small b/hadoop-mapreduce/src/benchmarks/gridmix/javasort/text-sort.small
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/javasort/text-sort.small
rename to hadoop-mapreduce/src/benchmarks/gridmix/javasort/text-sort.small
diff --git a/mapreduce/src/benchmarks/gridmix/maxent/maxent.large b/hadoop-mapreduce/src/benchmarks/gridmix/maxent/maxent.large
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/maxent/maxent.large
rename to hadoop-mapreduce/src/benchmarks/gridmix/maxent/maxent.large
diff --git a/mapreduce/src/benchmarks/gridmix/monsterQuery/monster_query.large b/hadoop-mapreduce/src/benchmarks/gridmix/monsterQuery/monster_query.large
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/monsterQuery/monster_query.large
rename to hadoop-mapreduce/src/benchmarks/gridmix/monsterQuery/monster_query.large
diff --git a/mapreduce/src/benchmarks/gridmix/monsterQuery/monster_query.medium b/hadoop-mapreduce/src/benchmarks/gridmix/monsterQuery/monster_query.medium
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/monsterQuery/monster_query.medium
rename to hadoop-mapreduce/src/benchmarks/gridmix/monsterQuery/monster_query.medium
diff --git a/mapreduce/src/benchmarks/gridmix/monsterQuery/monster_query.small b/hadoop-mapreduce/src/benchmarks/gridmix/monsterQuery/monster_query.small
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/monsterQuery/monster_query.small
rename to hadoop-mapreduce/src/benchmarks/gridmix/monsterQuery/monster_query.small
diff --git a/mapreduce/src/benchmarks/gridmix/pipesort/text-sort.large b/hadoop-mapreduce/src/benchmarks/gridmix/pipesort/text-sort.large
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/pipesort/text-sort.large
rename to hadoop-mapreduce/src/benchmarks/gridmix/pipesort/text-sort.large
diff --git a/mapreduce/src/benchmarks/gridmix/pipesort/text-sort.medium b/hadoop-mapreduce/src/benchmarks/gridmix/pipesort/text-sort.medium
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/pipesort/text-sort.medium
rename to hadoop-mapreduce/src/benchmarks/gridmix/pipesort/text-sort.medium
diff --git a/mapreduce/src/benchmarks/gridmix/pipesort/text-sort.small b/hadoop-mapreduce/src/benchmarks/gridmix/pipesort/text-sort.small
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/pipesort/text-sort.small
rename to hadoop-mapreduce/src/benchmarks/gridmix/pipesort/text-sort.small
diff --git a/mapreduce/src/benchmarks/gridmix/streamsort/text-sort.large b/hadoop-mapreduce/src/benchmarks/gridmix/streamsort/text-sort.large
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/streamsort/text-sort.large
rename to hadoop-mapreduce/src/benchmarks/gridmix/streamsort/text-sort.large
diff --git a/mapreduce/src/benchmarks/gridmix/streamsort/text-sort.medium b/hadoop-mapreduce/src/benchmarks/gridmix/streamsort/text-sort.medium
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/streamsort/text-sort.medium
rename to hadoop-mapreduce/src/benchmarks/gridmix/streamsort/text-sort.medium
diff --git a/mapreduce/src/benchmarks/gridmix/streamsort/text-sort.small b/hadoop-mapreduce/src/benchmarks/gridmix/streamsort/text-sort.small
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/streamsort/text-sort.small
rename to hadoop-mapreduce/src/benchmarks/gridmix/streamsort/text-sort.small
diff --git a/mapreduce/src/benchmarks/gridmix/submissionScripts/allThroughHod b/hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/allThroughHod
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/submissionScripts/allThroughHod
rename to hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/allThroughHod
diff --git a/mapreduce/src/benchmarks/gridmix/submissionScripts/allToSameCluster b/hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/allToSameCluster
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/submissionScripts/allToSameCluster
rename to hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/allToSameCluster
diff --git a/mapreduce/src/benchmarks/gridmix/submissionScripts/maxentHod b/hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/maxentHod
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/submissionScripts/maxentHod
rename to hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/maxentHod
diff --git a/mapreduce/src/benchmarks/gridmix/submissionScripts/maxentToSameCluster b/hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/maxentToSameCluster
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/submissionScripts/maxentToSameCluster
rename to hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/maxentToSameCluster
diff --git a/mapreduce/src/benchmarks/gridmix/submissionScripts/monsterQueriesHod b/hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/monsterQueriesHod
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/submissionScripts/monsterQueriesHod
rename to hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/monsterQueriesHod
diff --git a/mapreduce/src/benchmarks/gridmix/submissionScripts/monsterQueriesToSameCluster b/hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/monsterQueriesToSameCluster
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/submissionScripts/monsterQueriesToSameCluster
rename to hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/monsterQueriesToSameCluster
diff --git a/mapreduce/src/benchmarks/gridmix/submissionScripts/sleep_if_too_busy b/hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/sleep_if_too_busy
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/submissionScripts/sleep_if_too_busy
rename to hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/sleep_if_too_busy
diff --git a/mapreduce/src/benchmarks/gridmix/submissionScripts/textSortHod b/hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/textSortHod
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/submissionScripts/textSortHod
rename to hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/textSortHod
diff --git a/mapreduce/src/benchmarks/gridmix/submissionScripts/textSortToSameCluster b/hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/textSortToSameCluster
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/submissionScripts/textSortToSameCluster
rename to hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/textSortToSameCluster
diff --git a/mapreduce/src/benchmarks/gridmix/submissionScripts/webdataScanHod b/hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/webdataScanHod
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/submissionScripts/webdataScanHod
rename to hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/webdataScanHod
diff --git a/mapreduce/src/benchmarks/gridmix/submissionScripts/webdataScanToSameCluster b/hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/webdataScanToSameCluster
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/submissionScripts/webdataScanToSameCluster
rename to hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/webdataScanToSameCluster
diff --git a/mapreduce/src/benchmarks/gridmix/submissionScripts/webdataSortHod b/hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/webdataSortHod
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/submissionScripts/webdataSortHod
rename to hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/webdataSortHod
diff --git a/mapreduce/src/benchmarks/gridmix/submissionScripts/webdataSortToSameCluster b/hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/webdataSortToSameCluster
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/submissionScripts/webdataSortToSameCluster
rename to hadoop-mapreduce/src/benchmarks/gridmix/submissionScripts/webdataSortToSameCluster
diff --git a/mapreduce/src/benchmarks/gridmix/webdatascan/webdata_scan.large b/hadoop-mapreduce/src/benchmarks/gridmix/webdatascan/webdata_scan.large
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/webdatascan/webdata_scan.large
rename to hadoop-mapreduce/src/benchmarks/gridmix/webdatascan/webdata_scan.large
diff --git a/mapreduce/src/benchmarks/gridmix/webdatascan/webdata_scan.medium b/hadoop-mapreduce/src/benchmarks/gridmix/webdatascan/webdata_scan.medium
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/webdatascan/webdata_scan.medium
rename to hadoop-mapreduce/src/benchmarks/gridmix/webdatascan/webdata_scan.medium
diff --git a/mapreduce/src/benchmarks/gridmix/webdatascan/webdata_scan.small b/hadoop-mapreduce/src/benchmarks/gridmix/webdatascan/webdata_scan.small
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/webdatascan/webdata_scan.small
rename to hadoop-mapreduce/src/benchmarks/gridmix/webdatascan/webdata_scan.small
diff --git a/mapreduce/src/benchmarks/gridmix/webdatasort/webdata_sort.large b/hadoop-mapreduce/src/benchmarks/gridmix/webdatasort/webdata_sort.large
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/webdatasort/webdata_sort.large
rename to hadoop-mapreduce/src/benchmarks/gridmix/webdatasort/webdata_sort.large
diff --git a/mapreduce/src/benchmarks/gridmix/webdatasort/webdata_sort.medium b/hadoop-mapreduce/src/benchmarks/gridmix/webdatasort/webdata_sort.medium
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/webdatasort/webdata_sort.medium
rename to hadoop-mapreduce/src/benchmarks/gridmix/webdatasort/webdata_sort.medium
diff --git a/mapreduce/src/benchmarks/gridmix/webdatasort/webdata_sort.small b/hadoop-mapreduce/src/benchmarks/gridmix/webdatasort/webdata_sort.small
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix/webdatasort/webdata_sort.small
rename to hadoop-mapreduce/src/benchmarks/gridmix/webdatasort/webdata_sort.small
diff --git a/mapreduce/src/benchmarks/gridmix2/README.gridmix2 b/hadoop-mapreduce/src/benchmarks/gridmix2/README.gridmix2
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix2/README.gridmix2
rename to hadoop-mapreduce/src/benchmarks/gridmix2/README.gridmix2
diff --git a/mapreduce/src/benchmarks/gridmix2/build.xml b/hadoop-mapreduce/src/benchmarks/gridmix2/build.xml
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix2/build.xml
rename to hadoop-mapreduce/src/benchmarks/gridmix2/build.xml
diff --git a/mapreduce/src/benchmarks/gridmix2/generateGridmix2data.sh b/hadoop-mapreduce/src/benchmarks/gridmix2/generateGridmix2data.sh
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix2/generateGridmix2data.sh
rename to hadoop-mapreduce/src/benchmarks/gridmix2/generateGridmix2data.sh
diff --git a/mapreduce/src/benchmarks/gridmix2/gridmix-env-2 b/hadoop-mapreduce/src/benchmarks/gridmix2/gridmix-env-2
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix2/gridmix-env-2
rename to hadoop-mapreduce/src/benchmarks/gridmix2/gridmix-env-2
diff --git a/mapreduce/src/benchmarks/gridmix2/gridmix_config.xml b/hadoop-mapreduce/src/benchmarks/gridmix2/gridmix_config.xml
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix2/gridmix_config.xml
rename to hadoop-mapreduce/src/benchmarks/gridmix2/gridmix_config.xml
diff --git a/mapreduce/src/benchmarks/gridmix2/rungridmix_2 b/hadoop-mapreduce/src/benchmarks/gridmix2/rungridmix_2
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix2/rungridmix_2
rename to hadoop-mapreduce/src/benchmarks/gridmix2/rungridmix_2
diff --git a/mapreduce/src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapreduce/CombinerJobCreator.java b/hadoop-mapreduce/src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapreduce/CombinerJobCreator.java
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapreduce/CombinerJobCreator.java
rename to hadoop-mapreduce/src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapreduce/CombinerJobCreator.java
diff --git a/mapreduce/src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapreduce/GenericMRLoadJobCreator.java b/hadoop-mapreduce/src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapreduce/GenericMRLoadJobCreator.java
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapreduce/GenericMRLoadJobCreator.java
rename to hadoop-mapreduce/src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapreduce/GenericMRLoadJobCreator.java
diff --git a/mapreduce/src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapreduce/GridMixRunner.java b/hadoop-mapreduce/src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapreduce/GridMixRunner.java
similarity index 100%
rename from mapreduce/src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapreduce/GridMixRunner.java
rename to hadoop-mapreduce/src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapreduce/GridMixRunner.java
diff --git a/mapreduce/src/c++/librecordio/Makefile b/hadoop-mapreduce/src/c++/librecordio/Makefile
similarity index 100%
rename from mapreduce/src/c++/librecordio/Makefile
rename to hadoop-mapreduce/src/c++/librecordio/Makefile
diff --git a/mapreduce/src/c++/librecordio/archive.hh b/hadoop-mapreduce/src/c++/librecordio/archive.hh
similarity index 100%
rename from mapreduce/src/c++/librecordio/archive.hh
rename to hadoop-mapreduce/src/c++/librecordio/archive.hh
diff --git a/mapreduce/src/c++/librecordio/binarchive.cc b/hadoop-mapreduce/src/c++/librecordio/binarchive.cc
similarity index 100%
rename from mapreduce/src/c++/librecordio/binarchive.cc
rename to hadoop-mapreduce/src/c++/librecordio/binarchive.cc
diff --git a/mapreduce/src/c++/librecordio/binarchive.hh b/hadoop-mapreduce/src/c++/librecordio/binarchive.hh
similarity index 100%
rename from mapreduce/src/c++/librecordio/binarchive.hh
rename to hadoop-mapreduce/src/c++/librecordio/binarchive.hh
diff --git a/mapreduce/src/c++/librecordio/csvarchive.cc b/hadoop-mapreduce/src/c++/librecordio/csvarchive.cc
similarity index 100%
rename from mapreduce/src/c++/librecordio/csvarchive.cc
rename to hadoop-mapreduce/src/c++/librecordio/csvarchive.cc
diff --git a/mapreduce/src/c++/librecordio/csvarchive.hh b/hadoop-mapreduce/src/c++/librecordio/csvarchive.hh
similarity index 100%
rename from mapreduce/src/c++/librecordio/csvarchive.hh
rename to hadoop-mapreduce/src/c++/librecordio/csvarchive.hh
diff --git a/mapreduce/src/c++/librecordio/exception.cc b/hadoop-mapreduce/src/c++/librecordio/exception.cc
similarity index 100%
rename from mapreduce/src/c++/librecordio/exception.cc
rename to hadoop-mapreduce/src/c++/librecordio/exception.cc
diff --git a/mapreduce/src/c++/librecordio/exception.hh b/hadoop-mapreduce/src/c++/librecordio/exception.hh
similarity index 100%
rename from mapreduce/src/c++/librecordio/exception.hh
rename to hadoop-mapreduce/src/c++/librecordio/exception.hh
diff --git a/mapreduce/src/c++/librecordio/fieldTypeInfo.cc b/hadoop-mapreduce/src/c++/librecordio/fieldTypeInfo.cc
similarity index 100%
rename from mapreduce/src/c++/librecordio/fieldTypeInfo.cc
rename to hadoop-mapreduce/src/c++/librecordio/fieldTypeInfo.cc
diff --git a/mapreduce/src/c++/librecordio/fieldTypeInfo.hh b/hadoop-mapreduce/src/c++/librecordio/fieldTypeInfo.hh
similarity index 100%
rename from mapreduce/src/c++/librecordio/fieldTypeInfo.hh
rename to hadoop-mapreduce/src/c++/librecordio/fieldTypeInfo.hh
diff --git a/mapreduce/src/c++/librecordio/filestream.cc b/hadoop-mapreduce/src/c++/librecordio/filestream.cc
similarity index 100%
rename from mapreduce/src/c++/librecordio/filestream.cc
rename to hadoop-mapreduce/src/c++/librecordio/filestream.cc
diff --git a/mapreduce/src/c++/librecordio/filestream.hh b/hadoop-mapreduce/src/c++/librecordio/filestream.hh
similarity index 100%
rename from mapreduce/src/c++/librecordio/filestream.hh
rename to hadoop-mapreduce/src/c++/librecordio/filestream.hh
diff --git a/mapreduce/src/c++/librecordio/recordTypeInfo.cc b/hadoop-mapreduce/src/c++/librecordio/recordTypeInfo.cc
similarity index 100%
rename from mapreduce/src/c++/librecordio/recordTypeInfo.cc
rename to hadoop-mapreduce/src/c++/librecordio/recordTypeInfo.cc
diff --git a/mapreduce/src/c++/librecordio/recordTypeInfo.hh b/hadoop-mapreduce/src/c++/librecordio/recordTypeInfo.hh
similarity index 100%
rename from mapreduce/src/c++/librecordio/recordTypeInfo.hh
rename to hadoop-mapreduce/src/c++/librecordio/recordTypeInfo.hh
diff --git a/mapreduce/src/c++/librecordio/recordio.cc b/hadoop-mapreduce/src/c++/librecordio/recordio.cc
similarity index 100%
rename from mapreduce/src/c++/librecordio/recordio.cc
rename to hadoop-mapreduce/src/c++/librecordio/recordio.cc
diff --git a/mapreduce/src/c++/librecordio/recordio.hh b/hadoop-mapreduce/src/c++/librecordio/recordio.hh
similarity index 100%
rename from mapreduce/src/c++/librecordio/recordio.hh
rename to hadoop-mapreduce/src/c++/librecordio/recordio.hh
diff --git a/mapreduce/src/c++/librecordio/test/Makefile b/hadoop-mapreduce/src/c++/librecordio/test/Makefile
similarity index 100%
rename from mapreduce/src/c++/librecordio/test/Makefile
rename to hadoop-mapreduce/src/c++/librecordio/test/Makefile
diff --git a/mapreduce/src/c++/librecordio/test/test.cc b/hadoop-mapreduce/src/c++/librecordio/test/test.cc
similarity index 100%
rename from mapreduce/src/c++/librecordio/test/test.cc
rename to hadoop-mapreduce/src/c++/librecordio/test/test.cc
diff --git a/mapreduce/src/c++/librecordio/test/test.hh b/hadoop-mapreduce/src/c++/librecordio/test/test.hh
similarity index 100%
rename from mapreduce/src/c++/librecordio/test/test.hh
rename to hadoop-mapreduce/src/c++/librecordio/test/test.hh
diff --git a/mapreduce/src/c++/librecordio/test/test.jr b/hadoop-mapreduce/src/c++/librecordio/test/test.jr
similarity index 100%
rename from mapreduce/src/c++/librecordio/test/test.jr
rename to hadoop-mapreduce/src/c++/librecordio/test/test.jr
diff --git a/mapreduce/src/c++/librecordio/test/testFromJava.cc b/hadoop-mapreduce/src/c++/librecordio/test/testFromJava.cc
similarity index 100%
rename from mapreduce/src/c++/librecordio/test/testFromJava.cc
rename to hadoop-mapreduce/src/c++/librecordio/test/testFromJava.cc
diff --git a/mapreduce/src/c++/librecordio/test/testFromJava.hh b/hadoop-mapreduce/src/c++/librecordio/test/testFromJava.hh
similarity index 100%
rename from mapreduce/src/c++/librecordio/test/testFromJava.hh
rename to hadoop-mapreduce/src/c++/librecordio/test/testFromJava.hh
diff --git a/mapreduce/src/c++/librecordio/typeIDs.cc b/hadoop-mapreduce/src/c++/librecordio/typeIDs.cc
similarity index 100%
rename from mapreduce/src/c++/librecordio/typeIDs.cc
rename to hadoop-mapreduce/src/c++/librecordio/typeIDs.cc
diff --git a/mapreduce/src/c++/librecordio/typeIDs.hh b/hadoop-mapreduce/src/c++/librecordio/typeIDs.hh
similarity index 100%
rename from mapreduce/src/c++/librecordio/typeIDs.hh
rename to hadoop-mapreduce/src/c++/librecordio/typeIDs.hh
diff --git a/mapreduce/src/c++/librecordio/typeInfo.cc b/hadoop-mapreduce/src/c++/librecordio/typeInfo.cc
similarity index 100%
rename from mapreduce/src/c++/librecordio/typeInfo.cc
rename to hadoop-mapreduce/src/c++/librecordio/typeInfo.cc
diff --git a/mapreduce/src/c++/librecordio/typeInfo.hh b/hadoop-mapreduce/src/c++/librecordio/typeInfo.hh
similarity index 100%
rename from mapreduce/src/c++/librecordio/typeInfo.hh
rename to hadoop-mapreduce/src/c++/librecordio/typeInfo.hh
diff --git a/mapreduce/src/c++/librecordio/utils.cc b/hadoop-mapreduce/src/c++/librecordio/utils.cc
similarity index 100%
rename from mapreduce/src/c++/librecordio/utils.cc
rename to hadoop-mapreduce/src/c++/librecordio/utils.cc
diff --git a/mapreduce/src/c++/librecordio/utils.hh b/hadoop-mapreduce/src/c++/librecordio/utils.hh
similarity index 100%
rename from mapreduce/src/c++/librecordio/utils.hh
rename to hadoop-mapreduce/src/c++/librecordio/utils.hh
diff --git a/mapreduce/src/c++/librecordio/xmlarchive.cc b/hadoop-mapreduce/src/c++/librecordio/xmlarchive.cc
similarity index 100%
rename from mapreduce/src/c++/librecordio/xmlarchive.cc
rename to hadoop-mapreduce/src/c++/librecordio/xmlarchive.cc
diff --git a/mapreduce/src/c++/librecordio/xmlarchive.hh b/hadoop-mapreduce/src/c++/librecordio/xmlarchive.hh
similarity index 100%
rename from mapreduce/src/c++/librecordio/xmlarchive.hh
rename to hadoop-mapreduce/src/c++/librecordio/xmlarchive.hh
diff --git a/mapreduce/src/c++/pipes/.autom4te.cfg b/hadoop-mapreduce/src/c++/pipes/.autom4te.cfg
similarity index 100%
rename from mapreduce/src/c++/pipes/.autom4te.cfg
rename to hadoop-mapreduce/src/c++/pipes/.autom4te.cfg
diff --git a/mapreduce/src/c++/pipes/Makefile.am b/hadoop-mapreduce/src/c++/pipes/Makefile.am
similarity index 100%
rename from mapreduce/src/c++/pipes/Makefile.am
rename to hadoop-mapreduce/src/c++/pipes/Makefile.am
diff --git a/mapreduce/src/c++/pipes/api/hadoop/Pipes.hh b/hadoop-mapreduce/src/c++/pipes/api/hadoop/Pipes.hh
similarity index 100%
rename from mapreduce/src/c++/pipes/api/hadoop/Pipes.hh
rename to hadoop-mapreduce/src/c++/pipes/api/hadoop/Pipes.hh
diff --git a/mapreduce/src/c++/pipes/api/hadoop/TemplateFactory.hh b/hadoop-mapreduce/src/c++/pipes/api/hadoop/TemplateFactory.hh
similarity index 100%
rename from mapreduce/src/c++/pipes/api/hadoop/TemplateFactory.hh
rename to hadoop-mapreduce/src/c++/pipes/api/hadoop/TemplateFactory.hh
diff --git a/mapreduce/src/c++/pipes/configure.ac b/hadoop-mapreduce/src/c++/pipes/configure.ac
similarity index 100%
rename from mapreduce/src/c++/pipes/configure.ac
rename to hadoop-mapreduce/src/c++/pipes/configure.ac
diff --git a/mapreduce/src/c++/pipes/debug/pipes-default-gdb-commands.txt b/hadoop-mapreduce/src/c++/pipes/debug/pipes-default-gdb-commands.txt
similarity index 100%
rename from mapreduce/src/c++/pipes/debug/pipes-default-gdb-commands.txt
rename to hadoop-mapreduce/src/c++/pipes/debug/pipes-default-gdb-commands.txt
diff --git a/mapreduce/src/c++/pipes/debug/pipes-default-script b/hadoop-mapreduce/src/c++/pipes/debug/pipes-default-script
similarity index 100%
rename from mapreduce/src/c++/pipes/debug/pipes-default-script
rename to hadoop-mapreduce/src/c++/pipes/debug/pipes-default-script
diff --git a/mapreduce/src/c++/pipes/impl/HadoopPipes.cc b/hadoop-mapreduce/src/c++/pipes/impl/HadoopPipes.cc
similarity index 100%
rename from mapreduce/src/c++/pipes/impl/HadoopPipes.cc
rename to hadoop-mapreduce/src/c++/pipes/impl/HadoopPipes.cc
diff --git a/mapreduce/src/c++/task-controller/.autom4te.cfg b/hadoop-mapreduce/src/c++/task-controller/.autom4te.cfg
similarity index 100%
rename from mapreduce/src/c++/task-controller/.autom4te.cfg
rename to hadoop-mapreduce/src/c++/task-controller/.autom4te.cfg
diff --git a/mapreduce/src/c++/task-controller/Makefile.am b/hadoop-mapreduce/src/c++/task-controller/Makefile.am
similarity index 100%
rename from mapreduce/src/c++/task-controller/Makefile.am
rename to hadoop-mapreduce/src/c++/task-controller/Makefile.am
diff --git a/mapreduce/src/c++/task-controller/configuration.c b/hadoop-mapreduce/src/c++/task-controller/configuration.c
similarity index 100%
rename from mapreduce/src/c++/task-controller/configuration.c
rename to hadoop-mapreduce/src/c++/task-controller/configuration.c
diff --git a/mapreduce/src/c++/task-controller/configuration.h b/hadoop-mapreduce/src/c++/task-controller/configuration.h
similarity index 100%
rename from mapreduce/src/c++/task-controller/configuration.h
rename to hadoop-mapreduce/src/c++/task-controller/configuration.h
diff --git a/mapreduce/src/c++/task-controller/configure.ac b/hadoop-mapreduce/src/c++/task-controller/configure.ac
similarity index 100%
rename from mapreduce/src/c++/task-controller/configure.ac
rename to hadoop-mapreduce/src/c++/task-controller/configure.ac
diff --git a/mapreduce/src/c++/task-controller/main.c b/hadoop-mapreduce/src/c++/task-controller/main.c
similarity index 100%
rename from mapreduce/src/c++/task-controller/main.c
rename to hadoop-mapreduce/src/c++/task-controller/main.c
diff --git a/mapreduce/src/c++/task-controller/task-controller.c b/hadoop-mapreduce/src/c++/task-controller/task-controller.c
similarity index 100%
rename from mapreduce/src/c++/task-controller/task-controller.c
rename to hadoop-mapreduce/src/c++/task-controller/task-controller.c
diff --git a/mapreduce/src/c++/task-controller/task-controller.h b/hadoop-mapreduce/src/c++/task-controller/task-controller.h
similarity index 100%
rename from mapreduce/src/c++/task-controller/task-controller.h
rename to hadoop-mapreduce/src/c++/task-controller/task-controller.h
diff --git a/mapreduce/src/c++/task-controller/tests/test-task-controller.c b/hadoop-mapreduce/src/c++/task-controller/tests/test-task-controller.c
similarity index 100%
rename from mapreduce/src/c++/task-controller/tests/test-task-controller.c
rename to hadoop-mapreduce/src/c++/task-controller/tests/test-task-controller.c
diff --git a/mapreduce/src/c++/utils/.autom4te.cfg b/hadoop-mapreduce/src/c++/utils/.autom4te.cfg
similarity index 100%
rename from mapreduce/src/c++/utils/.autom4te.cfg
rename to hadoop-mapreduce/src/c++/utils/.autom4te.cfg
diff --git a/mapreduce/src/c++/utils/Makefile.am b/hadoop-mapreduce/src/c++/utils/Makefile.am
similarity index 100%
rename from mapreduce/src/c++/utils/Makefile.am
rename to hadoop-mapreduce/src/c++/utils/Makefile.am
diff --git a/mapreduce/src/c++/utils/api/hadoop/SerialUtils.hh b/hadoop-mapreduce/src/c++/utils/api/hadoop/SerialUtils.hh
similarity index 100%
rename from mapreduce/src/c++/utils/api/hadoop/SerialUtils.hh
rename to hadoop-mapreduce/src/c++/utils/api/hadoop/SerialUtils.hh
diff --git a/mapreduce/src/c++/utils/api/hadoop/StringUtils.hh b/hadoop-mapreduce/src/c++/utils/api/hadoop/StringUtils.hh
similarity index 100%
rename from mapreduce/src/c++/utils/api/hadoop/StringUtils.hh
rename to hadoop-mapreduce/src/c++/utils/api/hadoop/StringUtils.hh
diff --git a/mapreduce/src/c++/utils/configure.ac b/hadoop-mapreduce/src/c++/utils/configure.ac
similarity index 100%
rename from mapreduce/src/c++/utils/configure.ac
rename to hadoop-mapreduce/src/c++/utils/configure.ac
diff --git a/mapreduce/src/c++/utils/impl/SerialUtils.cc b/hadoop-mapreduce/src/c++/utils/impl/SerialUtils.cc
similarity index 100%
rename from mapreduce/src/c++/utils/impl/SerialUtils.cc
rename to hadoop-mapreduce/src/c++/utils/impl/SerialUtils.cc
diff --git a/mapreduce/src/c++/utils/impl/StringUtils.cc b/hadoop-mapreduce/src/c++/utils/impl/StringUtils.cc
similarity index 100%
rename from mapreduce/src/c++/utils/impl/StringUtils.cc
rename to hadoop-mapreduce/src/c++/utils/impl/StringUtils.cc
diff --git a/mapreduce/src/c++/utils/m4/hadoop_utils.m4 b/hadoop-mapreduce/src/c++/utils/m4/hadoop_utils.m4
similarity index 100%
rename from mapreduce/src/c++/utils/m4/hadoop_utils.m4
rename to hadoop-mapreduce/src/c++/utils/m4/hadoop_utils.m4
diff --git a/mapreduce/src/contrib/block_forensics/README b/hadoop-mapreduce/src/contrib/block_forensics/README
similarity index 100%
rename from mapreduce/src/contrib/block_forensics/README
rename to hadoop-mapreduce/src/contrib/block_forensics/README
diff --git a/mapreduce/src/contrib/block_forensics/build.xml b/hadoop-mapreduce/src/contrib/block_forensics/build.xml
similarity index 100%
rename from mapreduce/src/contrib/block_forensics/build.xml
rename to hadoop-mapreduce/src/contrib/block_forensics/build.xml
diff --git a/mapreduce/src/contrib/block_forensics/client/BlockForensics.java b/hadoop-mapreduce/src/contrib/block_forensics/client/BlockForensics.java
similarity index 100%
rename from mapreduce/src/contrib/block_forensics/client/BlockForensics.java
rename to hadoop-mapreduce/src/contrib/block_forensics/client/BlockForensics.java
diff --git a/hadoop-mapreduce/src/contrib/block_forensics/ivy.xml b/hadoop-mapreduce/src/contrib/block_forensics/ivy.xml
new file mode 100644
index 0000000..bb9f571
--- /dev/null
+++ b/hadoop-mapreduce/src/contrib/block_forensics/ivy.xml
@@ -0,0 +1,50 @@
+<?xml version="1.0" ?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<ivy-module version="1.0">
+ <info organisation="org.apache.hadoop" module="${ant.project.name}">
+ <license name="Apache 2.0"/>
+ <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
+ <description>
+ Apache Hadoop
+ </description>
+ </info>
+ <configurations defaultconfmapping="default">
+ <!--these match the Maven configurations-->
+ <conf name="default" extends="master,runtime"/>
+ <conf name="master" description="contains the artifact but no dependencies"/>
+ <conf name="runtime" description="runtime but not the artifact" />
+
+ <conf name="common" visibility="private"
+ extends="runtime"
+ description="artifacts needed to compile/test the application"/>
+ <conf name="test" visibility="private" extends="runtime"/>
+ </configurations>
+
+ <publications>
+ <!--get the artifact from our module name-->
+ <artifact conf="master"/>
+ </publications>
+ <dependencies>
+ <dependency org="org.apache.hadoop" name="hadoop-common"
+ rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-mapreduce-client-core"
+ rev="${yarn.version}" conf="common->default"/>
+ </dependencies>
+</ivy-module>
diff --git a/mapreduce/src/contrib/block_forensics/ivy/libraries.properties b/hadoop-mapreduce/src/contrib/block_forensics/ivy/libraries.properties
similarity index 100%
rename from mapreduce/src/contrib/block_forensics/ivy/libraries.properties
rename to hadoop-mapreduce/src/contrib/block_forensics/ivy/libraries.properties
diff --git a/mapreduce/src/contrib/block_forensics/src/java/org/apache/hadoop/blockforensics/BlockSearch.java b/hadoop-mapreduce/src/contrib/block_forensics/src/java/org/apache/hadoop/blockforensics/BlockSearch.java
similarity index 100%
rename from mapreduce/src/contrib/block_forensics/src/java/org/apache/hadoop/blockforensics/BlockSearch.java
rename to hadoop-mapreduce/src/contrib/block_forensics/src/java/org/apache/hadoop/blockforensics/BlockSearch.java
diff --git a/mapreduce/src/contrib/build-contrib.xml b/hadoop-mapreduce/src/contrib/build-contrib.xml
similarity index 100%
rename from mapreduce/src/contrib/build-contrib.xml
rename to hadoop-mapreduce/src/contrib/build-contrib.xml
diff --git a/mapreduce/src/contrib/build.xml b/hadoop-mapreduce/src/contrib/build.xml
similarity index 100%
rename from mapreduce/src/contrib/build.xml
rename to hadoop-mapreduce/src/contrib/build.xml
diff --git a/mapreduce/src/contrib/capacity-scheduler/README b/hadoop-mapreduce/src/contrib/capacity-scheduler/README
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/README
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/README
diff --git a/mapreduce/src/contrib/capacity-scheduler/build.xml b/hadoop-mapreduce/src/contrib/capacity-scheduler/build.xml
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/build.xml
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/build.xml
diff --git a/hadoop-mapreduce/src/contrib/capacity-scheduler/ivy.xml b/hadoop-mapreduce/src/contrib/capacity-scheduler/ivy.xml
new file mode 100644
index 0000000..4dd7e96
--- /dev/null
+++ b/hadoop-mapreduce/src/contrib/capacity-scheduler/ivy.xml
@@ -0,0 +1,91 @@
+<?xml version="1.0" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<ivy-module version="1.0">
+ <info organisation="org.apache.hadoop" module="${ant.project.name}">
+ <license name="Apache 2.0"/>
+ <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
+ <description>
+ Apache Hadoop
+ </description>
+ </info>
+ <configurations defaultconfmapping="default">
+ <!--these match the Maven configurations-->
+ <conf name="default" extends="master,runtime"/>
+ <conf name="master" description="contains the artifact but no dependencies"/>
+ <conf name="runtime" description="runtime but not the artifact" />
+
+ <conf name="common" visibility="private"
+ extends="runtime"
+ description="artifacts needed to compile/test the application"/>
+ <conf name="test" visibility="private" extends="runtime"/>
+ </configurations>
+
+ <publications>
+ <!--get the artifact from our module name-->
+ <artifact conf="master"/>
+ </publications>
+ <dependencies>
+ <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-common"
+ rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-common-test"
+ rev="${hadoop-common.version}" conf="test->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-hdfs"
+ rev="${hadoop-hdfs.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-hdfs-test"
+ rev="${hadoop-hdfs.version}" conf="test->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-mapreduce-client-core"
+ rev="${yarn.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-yarn-common"
+ rev="${yarn.version}" conf="common->default"/>
+ <dependency org="commons-cli" name="commons-cli"
+ rev="${commons-cli.version}" conf="common->default"/>
+ <dependency org="commons-logging" name="commons-logging"
+ rev="${commons-logging.version}" conf="common->default"/>
+ <dependency org="junit" name="junit"
+ rev="${junit.version}" conf="common->default"/>
+ <dependency org="log4j" name="log4j"
+ rev="${log4j.version}" conf="common->master"/>
+ <dependency org="org.mortbay.jetty" name="jetty-util"
+ rev="${jetty-util.version}" conf="common->master"/>
+ <dependency org="org.mortbay.jetty" name="jetty"
+ rev="${jetty.version}" conf="common->master"/>
+ <dependency org="org.mortbay.jetty" name="jsp-api-2.1"
+ rev="${jetty.version}" conf="common->master"/>
+ <dependency org="org.mortbay.jetty" name="jsp-2.1"
+ rev="${jetty.version}" conf="common->master"/>
+ <dependency org="org.mortbay.jetty" name="servlet-api-2.5"
+ rev="${servlet-api-2.5.version}" conf="common->master"/>
+ <dependency org="commons-httpclient" name="commons-httpclient"
+ rev="${commons-httpclient.version}" conf="common->master"/>
+ <dependency org="org.apache.avro" name="avro"
+ rev="${avro.version}" conf="common->default">
+ <exclude module="ant"/>
+ <exclude module="jetty"/>
+ <exclude module="slf4j-simple"/>
+ </dependency>
+
+ <!-- Exclusions for transitive dependencies pulled in by log4j -->
+ <exclude org="com.sun.jdmk"/>
+ <exclude org="com.sun.jmx"/>
+ <exclude org="javax.jms"/>
+ <exclude org="javax.mail"/>
+
+ </dependencies>
+</ivy-module>
diff --git a/mapreduce/src/contrib/capacity-scheduler/ivy/libraries.properties b/hadoop-mapreduce/src/contrib/capacity-scheduler/ivy/libraries.properties
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/ivy/libraries.properties
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/ivy/libraries.properties
diff --git a/mapreduce/src/contrib/capacity-scheduler/src/java/mapred-queues.xml.template b/hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/mapred-queues.xml.template
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/src/java/mapred-queues.xml.template
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/mapred-queues.xml.template
diff --git a/mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/AbstractQueue.java b/hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/AbstractQueue.java
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/AbstractQueue.java
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/AbstractQueue.java
diff --git a/mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacitySchedulerConf.java b/hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacitySchedulerConf.java
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacitySchedulerConf.java
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacitySchedulerConf.java
diff --git a/mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacityTaskScheduler.java b/hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacityTaskScheduler.java
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacityTaskScheduler.java
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacityTaskScheduler.java
diff --git a/mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/ContainerQueue.java b/hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/ContainerQueue.java
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/ContainerQueue.java
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/ContainerQueue.java
diff --git a/mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/JobInitializationPoller.java b/hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/JobInitializationPoller.java
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/JobInitializationPoller.java
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/JobInitializationPoller.java
diff --git a/mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/JobQueue.java b/hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/JobQueue.java
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/JobQueue.java
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/JobQueue.java
diff --git a/mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/JobQueuesManager.java b/hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/JobQueuesManager.java
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/JobQueuesManager.java
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/JobQueuesManager.java
diff --git a/mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/MemoryMatcher.java b/hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/MemoryMatcher.java
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/MemoryMatcher.java
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/MemoryMatcher.java
diff --git a/mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/QueueHierarchyBuilder.java b/hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/QueueHierarchyBuilder.java
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/QueueHierarchyBuilder.java
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/QueueHierarchyBuilder.java
diff --git a/mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/QueueSchedulingContext.java b/hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/QueueSchedulingContext.java
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/QueueSchedulingContext.java
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/QueueSchedulingContext.java
diff --git a/mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/TaskDataView.java b/hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/TaskDataView.java
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/TaskDataView.java
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/TaskDataView.java
diff --git a/mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/TaskSchedulingContext.java b/hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/TaskSchedulingContext.java
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/TaskSchedulingContext.java
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/TaskSchedulingContext.java
diff --git a/mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/CapacityTestUtils.java b/hadoop-mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/CapacityTestUtils.java
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/CapacityTestUtils.java
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/CapacityTestUtils.java
diff --git a/mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/ClusterWithCapacityScheduler.java b/hadoop-mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/ClusterWithCapacityScheduler.java
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/ClusterWithCapacityScheduler.java
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/ClusterWithCapacityScheduler.java
diff --git a/mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacityScheduler.java b/hadoop-mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacityScheduler.java
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacityScheduler.java
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacityScheduler.java
diff --git a/mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacitySchedulerConf.java b/hadoop-mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacitySchedulerConf.java
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacitySchedulerConf.java
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacitySchedulerConf.java
diff --git a/mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacitySchedulerWithJobTracker.java b/hadoop-mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacitySchedulerWithJobTracker.java
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacitySchedulerWithJobTracker.java
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacitySchedulerWithJobTracker.java
diff --git a/mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestContainerQueue.java b/hadoop-mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestContainerQueue.java
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestContainerQueue.java
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestContainerQueue.java
diff --git a/mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestRefreshOfQueues.java b/hadoop-mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestRefreshOfQueues.java
similarity index 100%
rename from mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestRefreshOfQueues.java
rename to hadoop-mapreduce/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestRefreshOfQueues.java
diff --git a/mapreduce/src/contrib/data_join/build.xml b/hadoop-mapreduce/src/contrib/data_join/build.xml
similarity index 100%
rename from mapreduce/src/contrib/data_join/build.xml
rename to hadoop-mapreduce/src/contrib/data_join/build.xml
diff --git a/hadoop-mapreduce/src/contrib/data_join/ivy.xml b/hadoop-mapreduce/src/contrib/data_join/ivy.xml
new file mode 100644
index 0000000..5ccc695
--- /dev/null
+++ b/hadoop-mapreduce/src/contrib/data_join/ivy.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<ivy-module version="1.0">
+ <info organisation="org.apache.hadoop" module="${ant.project.name}">
+ <license name="Apache 2.0"/>
+ <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
+ <description>
+ Apache Hadoop
+ </description>
+ </info>
+ <configurations defaultconfmapping="default">
+ <!--these match the Maven configurations-->
+ <conf name="default" extends="master,runtime"/>
+ <conf name="master" description="contains the artifact but no dependencies"/>
+ <conf name="runtime" description="runtime but not the artifact" />
+
+ <conf name="common" visibility="private"
+ extends="runtime"
+ description="artifacts needed to compile the application"/>
+ <conf name="test" visibility="private" extends="runtime"/>
+ </configurations>
+
+ <publications>
+ <!--get the artifact from our module name-->
+ <artifact conf="master"/>
+ </publications>
+ <dependencies>
+ <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-common" rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-common-test" rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-hdfs" rev="${hadoop-hdfs.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-hdfs-test" rev="${hadoop-hdfs.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-mapreduce-client-core"
+ rev="${yarn.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-yarn-common"
+ rev="${yarn.version}" conf="common->default"/>
+ <dependency org="commons-logging" name="commons-logging" rev="${commons-logging.version}" conf="common->default"/>
+ <dependency org="log4j" name="log4j" rev="${log4j.version}" conf="common->master"/>
+ <dependency org="junit" name="junit" rev="${junit.version}" conf="common->default"/>
+
+ <!-- Exclusions for transitive dependencies pulled in by log4j -->
+ <exclude org="com.sun.jdmk"/>
+ <exclude org="com.sun.jmx"/>
+ <exclude org="javax.jms"/>
+ <exclude org="javax.mail"/>
+
+ </dependencies>
+</ivy-module>
diff --git a/mapreduce/src/contrib/data_join/ivy/libraries.properties b/hadoop-mapreduce/src/contrib/data_join/ivy/libraries.properties
similarity index 100%
rename from mapreduce/src/contrib/data_join/ivy/libraries.properties
rename to hadoop-mapreduce/src/contrib/data_join/ivy/libraries.properties
diff --git a/mapreduce/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/README.txt b/hadoop-mapreduce/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/README.txt
similarity index 100%
rename from mapreduce/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/README.txt
rename to hadoop-mapreduce/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/README.txt
diff --git a/mapreduce/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleDataJoinMapper.java b/hadoop-mapreduce/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleDataJoinMapper.java
similarity index 100%
rename from mapreduce/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleDataJoinMapper.java
rename to hadoop-mapreduce/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleDataJoinMapper.java
diff --git a/mapreduce/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleDataJoinReducer.java b/hadoop-mapreduce/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleDataJoinReducer.java
similarity index 100%
rename from mapreduce/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleDataJoinReducer.java
rename to hadoop-mapreduce/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleDataJoinReducer.java
diff --git a/mapreduce/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleTaggedMapOutput.java b/hadoop-mapreduce/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleTaggedMapOutput.java
similarity index 100%
rename from mapreduce/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleTaggedMapOutput.java
rename to hadoop-mapreduce/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleTaggedMapOutput.java
diff --git a/mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ArrayListBackedIterator.java b/hadoop-mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ArrayListBackedIterator.java
similarity index 100%
rename from mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ArrayListBackedIterator.java
rename to hadoop-mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ArrayListBackedIterator.java
diff --git a/mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinJob.java b/hadoop-mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinJob.java
similarity index 100%
rename from mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinJob.java
rename to hadoop-mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinJob.java
diff --git a/mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinMapperBase.java b/hadoop-mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinMapperBase.java
similarity index 100%
rename from mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinMapperBase.java
rename to hadoop-mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinMapperBase.java
diff --git a/mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinReducerBase.java b/hadoop-mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinReducerBase.java
similarity index 100%
rename from mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinReducerBase.java
rename to hadoop-mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinReducerBase.java
diff --git a/mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/JobBase.java b/hadoop-mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/JobBase.java
similarity index 100%
rename from mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/JobBase.java
rename to hadoop-mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/JobBase.java
diff --git a/mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ResetableIterator.java b/hadoop-mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ResetableIterator.java
similarity index 100%
rename from mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ResetableIterator.java
rename to hadoop-mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ResetableIterator.java
diff --git a/mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/TaggedMapOutput.java b/hadoop-mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/TaggedMapOutput.java
similarity index 100%
rename from mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/TaggedMapOutput.java
rename to hadoop-mapreduce/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/TaggedMapOutput.java
diff --git a/mapreduce/src/contrib/data_join/src/test/org/apache/hadoop/contrib/utils/join/TestDataJoin.java b/hadoop-mapreduce/src/contrib/data_join/src/test/org/apache/hadoop/contrib/utils/join/TestDataJoin.java
similarity index 100%
rename from mapreduce/src/contrib/data_join/src/test/org/apache/hadoop/contrib/utils/join/TestDataJoin.java
rename to hadoop-mapreduce/src/contrib/data_join/src/test/org/apache/hadoop/contrib/utils/join/TestDataJoin.java
diff --git a/mapreduce/src/contrib/dynamic-scheduler/README b/hadoop-mapreduce/src/contrib/dynamic-scheduler/README
similarity index 100%
rename from mapreduce/src/contrib/dynamic-scheduler/README
rename to hadoop-mapreduce/src/contrib/dynamic-scheduler/README
diff --git a/mapreduce/src/contrib/dynamic-scheduler/build.xml b/hadoop-mapreduce/src/contrib/dynamic-scheduler/build.xml
similarity index 100%
rename from mapreduce/src/contrib/dynamic-scheduler/build.xml
rename to hadoop-mapreduce/src/contrib/dynamic-scheduler/build.xml
diff --git a/hadoop-mapreduce/src/contrib/dynamic-scheduler/ivy.xml b/hadoop-mapreduce/src/contrib/dynamic-scheduler/ivy.xml
new file mode 100644
index 0000000..4c67461
--- /dev/null
+++ b/hadoop-mapreduce/src/contrib/dynamic-scheduler/ivy.xml
@@ -0,0 +1,66 @@
+<?xml version="1.0" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<ivy-module version="1.0">
+ <info organisation="org.apache.hadoop" module="${ant.project.name}">
+ <license name="Apache 2.0"/>
+ <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
+ <description> Apache Hadoop contrib </description>
+ </info>
+ <configurations defaultconfmapping="default">
+ <!--these match the Maven configurations-->
+ <conf name="default" extends="master,runtime"/>
+ <conf name="master" description="contains the artifact but no dependencies"/>
+ <conf name="runtime" description="runtime but not the artifact" />
+
+ <conf name="common" visibility="private" extends="runtime" description="common artifacts"/>
+ <conf name="test" visibility="private" extends="runtime"/>
+ </configurations>
+
+ <publications>
+ <artifact conf="master"/>
+ </publications>
+
+ <dependencies>
+ <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-common" rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-mapreduce-client-core"
+ rev="${yarn.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-yarn-common"
+ rev="${yarn.version}" conf="common->default"/>
+ <dependency org="org.mortbay.jetty" name="jetty" rev="${jetty.version}" conf="common->master"/>
+ <dependency org="org.mortbay.jetty" name="jetty-util" rev="${jetty-util.version}" conf="common->master"/>
+ <dependency org="tomcat" name="jasper-runtime" rev="${jasper.version}" conf="common->master"/>
+ <dependency org="tomcat" name="jasper-compiler" rev="${jasper.version}" conf="common->master"/>
+ <dependency org="commons-el" name="commons-el" rev="${commons-el.version}" conf="common->master"/>
+ <dependency org="commons-logging" name="commons-logging" rev="${commons-logging.version}" conf="common->master"/>
+ <dependency org="log4j" name="log4j" rev="${log4j.version}" conf="common->master"/>
+ <dependency org="net.java.dev.jets3t" name="jets3t" rev="${jets3t.version}" conf="common->master"/>
+ <dependency org="commons-net" name="commons-net" rev="${commons-net.version}" conf="common->master"/>
+ <dependency org="org.mortbay.jetty" name="servlet-api-2.5" rev="${servlet-api-2.5.version}" conf="common->master"/>
+ <dependency org="junit" name="junit" rev="${junit.version}" conf="common->default"/>
+ <dependency org="org.slf4j" name="slf4j-api" rev="${slf4j-api.version}" conf="common->master"/>
+
+ <!-- Exclusions for transitive dependencies pulled in by log4j -->
+ <exclude org="com.sun.jdmk"/>
+ <exclude org="com.sun.jmx"/>
+ <exclude org="javax.jms"/>
+ <exclude org="javax.mail"/>
+
+ </dependencies>
+</ivy-module>
diff --git a/mapreduce/src/contrib/dynamic-scheduler/ivy/libraries.properties b/hadoop-mapreduce/src/contrib/dynamic-scheduler/ivy/libraries.properties
similarity index 100%
rename from mapreduce/src/contrib/dynamic-scheduler/ivy/libraries.properties
rename to hadoop-mapreduce/src/contrib/dynamic-scheduler/ivy/libraries.properties
diff --git a/mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/AllocationStore.java b/hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/AllocationStore.java
similarity index 100%
rename from mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/AllocationStore.java
rename to hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/AllocationStore.java
diff --git a/mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/BudgetQueue.java b/hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/BudgetQueue.java
similarity index 100%
rename from mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/BudgetQueue.java
rename to hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/BudgetQueue.java
diff --git a/mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/DynamicPriorityScheduler.java b/hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/DynamicPriorityScheduler.java
similarity index 100%
rename from mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/DynamicPriorityScheduler.java
rename to hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/DynamicPriorityScheduler.java
diff --git a/mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/DynamicPriorityServlet.java b/hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/DynamicPriorityServlet.java
similarity index 100%
rename from mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/DynamicPriorityServlet.java
rename to hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/DynamicPriorityServlet.java
diff --git a/mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/FileAllocationStore.java b/hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/FileAllocationStore.java
similarity index 100%
rename from mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/FileAllocationStore.java
rename to hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/FileAllocationStore.java
diff --git a/mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/PriorityAuthorization.java b/hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/PriorityAuthorization.java
similarity index 100%
rename from mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/PriorityAuthorization.java
rename to hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/PriorityAuthorization.java
diff --git a/mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/PriorityScheduler.java b/hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/PriorityScheduler.java
similarity index 100%
rename from mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/PriorityScheduler.java
rename to hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/PriorityScheduler.java
diff --git a/mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/PrioritySchedulerOptions.java b/hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/PrioritySchedulerOptions.java
similarity index 100%
rename from mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/PrioritySchedulerOptions.java
rename to hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/PrioritySchedulerOptions.java
diff --git a/mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/QueueAllocation.java b/hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/QueueAllocation.java
similarity index 100%
rename from mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/QueueAllocation.java
rename to hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/QueueAllocation.java
diff --git a/mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/QueueAllocator.java b/hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/QueueAllocator.java
similarity index 100%
rename from mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/QueueAllocator.java
rename to hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/QueueAllocator.java
diff --git a/mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/QueueTaskScheduler.java b/hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/QueueTaskScheduler.java
similarity index 100%
rename from mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/QueueTaskScheduler.java
rename to hadoop-mapreduce/src/contrib/dynamic-scheduler/src/java/org/apache/hadoop/mapred/QueueTaskScheduler.java
diff --git a/mapreduce/src/contrib/dynamic-scheduler/src/test/org/apache/hadoop/mapred/BaseSchedulerTest.java b/hadoop-mapreduce/src/contrib/dynamic-scheduler/src/test/org/apache/hadoop/mapred/BaseSchedulerTest.java
similarity index 100%
rename from mapreduce/src/contrib/dynamic-scheduler/src/test/org/apache/hadoop/mapred/BaseSchedulerTest.java
rename to hadoop-mapreduce/src/contrib/dynamic-scheduler/src/test/org/apache/hadoop/mapred/BaseSchedulerTest.java
diff --git a/mapreduce/src/contrib/dynamic-scheduler/src/test/org/apache/hadoop/mapred/FakeDynamicScheduler.java b/hadoop-mapreduce/src/contrib/dynamic-scheduler/src/test/org/apache/hadoop/mapred/FakeDynamicScheduler.java
similarity index 100%
rename from mapreduce/src/contrib/dynamic-scheduler/src/test/org/apache/hadoop/mapred/FakeDynamicScheduler.java
rename to hadoop-mapreduce/src/contrib/dynamic-scheduler/src/test/org/apache/hadoop/mapred/FakeDynamicScheduler.java
diff --git a/mapreduce/src/contrib/dynamic-scheduler/src/test/org/apache/hadoop/mapred/TestDynamicScheduler.java b/hadoop-mapreduce/src/contrib/dynamic-scheduler/src/test/org/apache/hadoop/mapred/TestDynamicScheduler.java
similarity index 100%
rename from mapreduce/src/contrib/dynamic-scheduler/src/test/org/apache/hadoop/mapred/TestDynamicScheduler.java
rename to hadoop-mapreduce/src/contrib/dynamic-scheduler/src/test/org/apache/hadoop/mapred/TestDynamicScheduler.java
diff --git a/mapreduce/src/contrib/dynamic-scheduler/src/test/org/apache/hadoop/mapred/TestPriorityScheduler.java b/hadoop-mapreduce/src/contrib/dynamic-scheduler/src/test/org/apache/hadoop/mapred/TestPriorityScheduler.java
similarity index 100%
rename from mapreduce/src/contrib/dynamic-scheduler/src/test/org/apache/hadoop/mapred/TestPriorityScheduler.java
rename to hadoop-mapreduce/src/contrib/dynamic-scheduler/src/test/org/apache/hadoop/mapred/TestPriorityScheduler.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/.classpath b/hadoop-mapreduce/src/contrib/eclipse-plugin/.classpath
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/.classpath
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/.classpath
diff --git a/mapreduce/src/contrib/eclipse-plugin/.project b/hadoop-mapreduce/src/contrib/eclipse-plugin/.project
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/.project
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/.project
diff --git a/mapreduce/src/contrib/eclipse-plugin/.settings/org.eclipse.jdt.core.prefs b/hadoop-mapreduce/src/contrib/eclipse-plugin/.settings/org.eclipse.jdt.core.prefs
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/.settings/org.eclipse.jdt.core.prefs
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/.settings/org.eclipse.jdt.core.prefs
diff --git a/mapreduce/src/contrib/eclipse-plugin/.settings/org.eclipse.jdt.ui.prefs b/hadoop-mapreduce/src/contrib/eclipse-plugin/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/.settings/org.eclipse.jdt.ui.prefs
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/.settings/org.eclipse.jdt.ui.prefs
diff --git a/mapreduce/src/contrib/eclipse-plugin/.settings/org.eclipse.wst.validation.prefs b/hadoop-mapreduce/src/contrib/eclipse-plugin/.settings/org.eclipse.wst.validation.prefs
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/.settings/org.eclipse.wst.validation.prefs
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/.settings/org.eclipse.wst.validation.prefs
diff --git a/mapreduce/src/contrib/eclipse-plugin/META-INF/MANIFEST.MF b/hadoop-mapreduce/src/contrib/eclipse-plugin/META-INF/MANIFEST.MF
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/META-INF/MANIFEST.MF
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/META-INF/MANIFEST.MF
diff --git a/mapreduce/src/contrib/eclipse-plugin/build.properties b/hadoop-mapreduce/src/contrib/eclipse-plugin/build.properties
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/build.properties
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/build.properties
diff --git a/mapreduce/src/contrib/eclipse-plugin/build.xml b/hadoop-mapreduce/src/contrib/eclipse-plugin/build.xml
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/build.xml
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/build.xml
diff --git a/mapreduce/src/contrib/eclipse-plugin/ivy.xml b/hadoop-mapreduce/src/contrib/eclipse-plugin/ivy.xml
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/ivy.xml
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/ivy.xml
diff --git a/mapreduce/src/contrib/eclipse-plugin/ivy/libraries.properties b/hadoop-mapreduce/src/contrib/eclipse-plugin/ivy/libraries.properties
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/ivy/libraries.properties
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/ivy/libraries.properties
diff --git a/mapreduce/src/contrib/eclipse-plugin/plugin.xml b/hadoop-mapreduce/src/contrib/eclipse-plugin/plugin.xml
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/plugin.xml
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/plugin.xml
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Components/Conf.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Components/Conf.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Components/Conf.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Components/Conf.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Components/Export.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Components/Export.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Components/Export.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Components/Export.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Components/Import.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Components/Import.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Components/Import.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Components/Import.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Components/New.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Components/New.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Components/New.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Components/New.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Components/Reload.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Components/Reload.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Components/Reload.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Components/Reload.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Components/Tool.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Components/Tool.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Components/Tool.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Components/Tool.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Components/Tools.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Components/Tools.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Components/Tools.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Components/Tools.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/ConnectDFS.xml b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/ConnectDFS.xml
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/ConnectDFS.xml
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/ConnectDFS.xml
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/CreateProj.xml b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/CreateProj.xml
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/CreateProj.xml
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/CreateProj.xml
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Elephant-16x16.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant-16x16.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Elephant-16x16.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant-16x16.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Elephant-24x24.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant-24x24.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Elephant-24x24.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant-24x24.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Elephant-32x32.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant-32x32.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Elephant-32x32.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant-32x32.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Elephant-64x64.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant-64x64.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Elephant-64x64.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant-64x64.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Elephant-small-16x16.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant-small-16x16.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Elephant-small-16x16.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant-small-16x16.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Elephant.jpg b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant.jpg
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Elephant.jpg
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant.jpg
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Elephant100x100.gif b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant100x100.gif
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Elephant100x100.gif
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant100x100.gif
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Elephant16x16.gif b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant16x16.gif
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Elephant16x16.gif
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant16x16.gif
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Elephant2-136x136.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant2-136x136.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Elephant2-136x136.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant2-136x136.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Elephant2-16x16.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant2-16x16.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Elephant2-16x16.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant2-16x16.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Elephant2-24x24.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant2-24x24.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Elephant2-24x24.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant2-24x24.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Elephant2-32x32.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant2-32x32.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Elephant2-32x32.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant2-32x32.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Elephant2-64x64.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant2-64x64.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Elephant2-64x64.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant2-64x64.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Elephant2.jpg b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant2.jpg
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Elephant2.jpg
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant2.jpg
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Elephant3-122x122.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant3-122x122.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Elephant3-122x122.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant3-122x122.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Elephant3-16x16.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant3-16x16.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Elephant3-16x16.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant3-16x16.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Elephant3-24x24.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant3-24x24.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Elephant3-24x24.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Elephant3-24x24.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/HelloWorld.xml b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/HelloWorld.xml
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/HelloWorld.xml
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/HelloWorld.xml
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/MAP100x100.gif b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/MAP100x100.gif
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/MAP100x100.gif
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/MAP100x100.gif
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/MAP16x15.gif b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/MAP16x15.gif
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/MAP16x15.gif
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/MAP16x15.gif
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/RunProj.xml b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/RunProj.xml
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/RunProj.xml
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/RunProj.xml
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/SetHadoopPath.xml b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/SetHadoopPath.xml
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/SetHadoopPath.xml
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/SetHadoopPath.xml
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/Setup.xml b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Setup.xml
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/Setup.xml
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/Setup.xml
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/download.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/download.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/download.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/download.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/drive100x100.gif b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/drive100x100.gif
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/drive100x100.gif
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/drive100x100.gif
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/drive16x16.gif b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/drive16x16.gif
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/drive16x16.gif
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/drive16x16.gif
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/driver.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/driver.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/driver.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/driver.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/driverwiz.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/driverwiz.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/driverwiz.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/driverwiz.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/elephantblue16x16.gif b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/elephantblue16x16.gif
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/elephantblue16x16.gif
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/elephantblue16x16.gif
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/files.gif b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/files.gif
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/files.gif
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/files.gif
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/hadoop-logo-16x16.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/hadoop-logo-16x16.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/hadoop-logo-16x16.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/hadoop-logo-16x16.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/hadoop-logo-24x24.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/hadoop-logo-24x24.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/hadoop-logo-24x24.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/hadoop-logo-24x24.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/hadoop-logo-85x85.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/hadoop-logo-85x85.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/hadoop-logo-85x85.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/hadoop-logo-85x85.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/hadoop-logo.jpg b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/hadoop-logo.jpg
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/hadoop-logo.jpg
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/hadoop-logo.jpg
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/hadoop.gif b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/hadoop.gif
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/hadoop.gif
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/hadoop.gif
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/hadoop_small.gif b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/hadoop_small.gif
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/hadoop_small.gif
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/hadoop_small.gif
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/job.gif b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/job.gif
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/job.gif
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/job.gif
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/location-edit-16x16.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/location-edit-16x16.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/location-edit-16x16.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/location-edit-16x16.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/location-new-16x16.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/location-new-16x16.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/location-new-16x16.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/location-new-16x16.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/map16x16.gif b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/map16x16.gif
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/map16x16.gif
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/map16x16.gif
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/mapper16.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/mapper16.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/mapper16.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/mapper16.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/mapwiz.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/mapwiz.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/mapwiz.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/mapwiz.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/new-folder.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/new-folder.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/new-folder.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/new-folder.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/projwiz.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/projwiz.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/projwiz.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/projwiz.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/reduce100x100.gif b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/reduce100x100.gif
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/reduce100x100.gif
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/reduce100x100.gif
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/reduce16x16.gif b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/reduce16x16.gif
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/reduce16x16.gif
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/reduce16x16.gif
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/reducer-16x16.gif b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/reducer-16x16.gif
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/reducer-16x16.gif
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/reducer-16x16.gif
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/reducer16.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/reducer16.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/reducer16.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/reducer16.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/reducewiz.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/reducewiz.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/reducewiz.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/reducewiz.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/refresh.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/refresh.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/refresh.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/refresh.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/spite_overcloud.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/spite_overcloud.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/spite_overcloud.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/spite_overcloud.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/spitesmall.gif b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/spitesmall.gif
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/spitesmall.gif
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/spitesmall.gif
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/spitesmall.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/spitesmall.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/spitesmall.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/spitesmall.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/resources/upload.png b/hadoop-mapreduce/src/contrib/eclipse-plugin/resources/upload.png
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/resources/upload.png
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/resources/upload.png
Binary files differ
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/Activator.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/Activator.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/Activator.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/Activator.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/ErrorMessageDialog.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/ErrorMessageDialog.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/ErrorMessageDialog.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/ErrorMessageDialog.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/HadoopPerspectiveFactory.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/HadoopPerspectiveFactory.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/HadoopPerspectiveFactory.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/HadoopPerspectiveFactory.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/ImageLibrary.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/ImageLibrary.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/ImageLibrary.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/ImageLibrary.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/MapReduceNature.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/MapReduceNature.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/MapReduceNature.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/MapReduceNature.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewDriverWizard.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewDriverWizard.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewDriverWizard.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewDriverWizard.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewDriverWizardPage.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewDriverWizardPage.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewDriverWizardPage.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewDriverWizardPage.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewMapReduceProjectWizard.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewMapReduceProjectWizard.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewMapReduceProjectWizard.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewMapReduceProjectWizard.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewMapperWizard.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewMapperWizard.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewMapperWizard.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewMapperWizard.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewReducerWizard.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewReducerWizard.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewReducerWizard.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewReducerWizard.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/PropertyTester.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/PropertyTester.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/PropertyTester.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/PropertyTester.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/DFSActionImpl.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/DFSActionImpl.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/DFSActionImpl.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/DFSActionImpl.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/EditLocationAction.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/EditLocationAction.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/EditLocationAction.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/EditLocationAction.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/NewLocationAction.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/NewLocationAction.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/NewLocationAction.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/NewLocationAction.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/OpenNewMRClassWizardAction.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/OpenNewMRClassWizardAction.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/OpenNewMRClassWizardAction.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/OpenNewMRClassWizardAction.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/OpenNewMRProjectAction.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/OpenNewMRProjectAction.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/OpenNewMRProjectAction.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/OpenNewMRProjectAction.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/ActionProvider.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/ActionProvider.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/ActionProvider.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/ActionProvider.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSActions.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSActions.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSActions.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSActions.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSContent.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSContent.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSContent.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSContent.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSContentProvider.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSContentProvider.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSContentProvider.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSContentProvider.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSFile.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSFile.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSFile.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSFile.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSFolder.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSFolder.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSFolder.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSFolder.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSLocation.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSLocation.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSLocation.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSLocation.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSLocationsRoot.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSLocationsRoot.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSLocationsRoot.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSLocationsRoot.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSMessage.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSMessage.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSMessage.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSMessage.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSPath.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSPath.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSPath.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSPath.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/HadoopApplicationLaunchShortcut.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/HadoopApplicationLaunchShortcut.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/HadoopApplicationLaunchShortcut.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/HadoopApplicationLaunchShortcut.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/LocalMapReduceLaunchTabGroup.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/LocalMapReduceLaunchTabGroup.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/LocalMapReduceLaunchTabGroup.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/LocalMapReduceLaunchTabGroup.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/MutexRule.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/MutexRule.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/MutexRule.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/MutexRule.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/StartHadoopLaunchTabGroup.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/StartHadoopLaunchTabGroup.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/StartHadoopLaunchTabGroup.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/StartHadoopLaunchTabGroup.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/MapReducePreferencePage.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/MapReducePreferencePage.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/MapReducePreferencePage.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/MapReducePreferencePage.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/PreferenceConstants.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/PreferenceConstants.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/PreferenceConstants.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/PreferenceConstants.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/PreferenceInitializer.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/PreferenceInitializer.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/PreferenceInitializer.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/PreferenceInitializer.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/ConfProp.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/ConfProp.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/ConfProp.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/ConfProp.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopJob.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopJob.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopJob.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopJob.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopPathPage.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopPathPage.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopPathPage.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopPathPage.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopServer.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopServer.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopServer.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopServer.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/IJobListener.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/IJobListener.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/IJobListener.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/IJobListener.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/JarModule.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/JarModule.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/JarModule.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/JarModule.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopLocationWizard.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopLocationWizard.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopLocationWizard.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopLocationWizard.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopServerSelectionListContentProvider.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopServerSelectionListContentProvider.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopServerSelectionListContentProvider.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopServerSelectionListContentProvider.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/IHadoopServerListener.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/IHadoopServerListener.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/IHadoopServerListener.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/IHadoopServerListener.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/RunOnHadoopWizard.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/RunOnHadoopWizard.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/RunOnHadoopWizard.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/RunOnHadoopWizard.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/ServerRegistry.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/ServerRegistry.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/ServerRegistry.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/ServerRegistry.java
diff --git a/mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/view/servers/ServerView.java b/hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/view/servers/ServerView.java
similarity index 100%
rename from mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/view/servers/ServerView.java
rename to hadoop-mapreduce/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/view/servers/ServerView.java
diff --git a/mapreduce/src/contrib/fairscheduler/README b/hadoop-mapreduce/src/contrib/fairscheduler/README
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/README
rename to hadoop-mapreduce/src/contrib/fairscheduler/README
diff --git a/mapreduce/src/contrib/fairscheduler/build.xml b/hadoop-mapreduce/src/contrib/fairscheduler/build.xml
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/build.xml
rename to hadoop-mapreduce/src/contrib/fairscheduler/build.xml
diff --git a/mapreduce/src/contrib/fairscheduler/designdoc/fair_scheduler_design_doc.pdf b/hadoop-mapreduce/src/contrib/fairscheduler/designdoc/fair_scheduler_design_doc.pdf
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/designdoc/fair_scheduler_design_doc.pdf
rename to hadoop-mapreduce/src/contrib/fairscheduler/designdoc/fair_scheduler_design_doc.pdf
Binary files differ
diff --git a/mapreduce/src/contrib/fairscheduler/designdoc/fair_scheduler_design_doc.tex b/hadoop-mapreduce/src/contrib/fairscheduler/designdoc/fair_scheduler_design_doc.tex
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/designdoc/fair_scheduler_design_doc.tex
rename to hadoop-mapreduce/src/contrib/fairscheduler/designdoc/fair_scheduler_design_doc.tex
diff --git a/hadoop-mapreduce/src/contrib/fairscheduler/ivy.xml b/hadoop-mapreduce/src/contrib/fairscheduler/ivy.xml
new file mode 100644
index 0000000..250bcad
--- /dev/null
+++ b/hadoop-mapreduce/src/contrib/fairscheduler/ivy.xml
@@ -0,0 +1,116 @@
+<?xml version="1.0" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<ivy-module version="1.0">
+ <info organisation="org.apache.hadoop" module="${ant.project.name}">
+ <license name="Apache 2.0"/>
+ <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
+ <description>
+ Apache Hadoop contrib
+ </description>
+ </info>
+ <configurations defaultconfmapping="default">
+ <!--these match the Maven configurations-->
+ <conf name="default" extends="master,runtime"/>
+ <conf name="master" description="contains the artifact but no dependencies"/>
+ <conf name="runtime" description="runtime but not the artifact" />
+
+ <conf name="common" visibility="private"
+ description="artifacts needed to compile/test the application"/>
+ <conf name="test" visibility="private" extends="runtime"/>
+ </configurations>
+
+ <publications>
+ <!--get the artifact from our module name-->
+ <artifact conf="master"/>
+ </publications>
+ <dependencies>
+ <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-common"
+ rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-common-test"
+ rev="${hadoop-common.version}" conf="test->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-hdfs"
+ rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-hdfs-test"
+ rev="${hadoop-common.version}" conf="test->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-mapreduce-client-core"
+ rev="${yarn.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-yarn-common"
+ rev="${yarn.version}" conf="common->default"/>
+ <dependency org="commons-logging"
+ name="commons-logging"
+ rev="${commons-logging.version}"
+ conf="common->default"/>
+ <dependency org="log4j"
+ name="log4j"
+ rev="${log4j.version}"
+ conf="common->master"/>
+ <dependency org="org.mortbay.jetty"
+ name="servlet-api-2.5"
+ rev="${servlet-api-2.5.version}"
+ conf="common->default"/>
+ <dependency org="junit"
+ name="junit"
+ rev="${junit.version}"
+ conf="common->default"/>
+ <dependency org="org.apache.avro"
+ name="avro"
+ rev="${avro.version}"
+ conf="common->default">
+ <exclude module="ant"/>
+ <exclude module="jetty"/>
+ <exclude module="slf4j-simple"/>
+ </dependency>
+ <dependency org="org.codehaus.jackson"
+ name="jackson-mapper-asl"
+ rev="${jackson.version}"
+ conf="common->default"/>
+ <dependency org="com.thoughtworks.paranamer"
+ name="paranamer"
+ rev="${paranamer.version}"
+ conf="common->default"/>
+ <dependency org="com.thoughtworks.paranamer"
+ name="paranamer-ant"
+ rev="${paranamer.version}"
+ conf="common->default"/>
+ <dependency org="org.mortbay.jetty"
+ name="jetty-util"
+ rev="${jetty-util.version}"
+ conf="common->master"/>
+ <dependency org="org.mortbay.jetty"
+ name="jetty"
+ rev="${jetty.version}"
+ conf="common->master"/>
+ <dependency org="org.mortbay.jetty"
+ name="jsp-api-2.1"
+ rev="${jetty.version}"
+ conf="common->master"/>
+ <dependency org="org.mortbay.jetty"
+ name="jsp-2.1"
+ rev="${jetty.version}"
+ conf="common->master"/>
+
+ <!-- Exclusions for transitive dependencies pulled in by log4j -->
+ <exclude org="com.sun.jdmk"/>
+ <exclude org="com.sun.jmx"/>
+ <exclude org="javax.jms"/>
+ <exclude org="javax.mail"/>
+
+ </dependencies>
+</ivy-module>
diff --git a/mapreduce/src/contrib/fairscheduler/ivy/libraries.properties b/hadoop-mapreduce/src/contrib/fairscheduler/ivy/libraries.properties
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/ivy/libraries.properties
rename to hadoop-mapreduce/src/contrib/fairscheduler/ivy/libraries.properties
diff --git a/mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/AllocationConfigurationException.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/AllocationConfigurationException.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/AllocationConfigurationException.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/AllocationConfigurationException.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/CapBasedLoadManager.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/CapBasedLoadManager.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/CapBasedLoadManager.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/CapBasedLoadManager.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/DefaultTaskSelector.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/DefaultTaskSelector.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/DefaultTaskSelector.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/DefaultTaskSelector.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairScheduler.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairScheduler.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairScheduler.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairScheduler.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerEventLog.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerEventLog.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerEventLog.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerEventLog.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerServlet.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerServlet.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerServlet.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerServlet.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FifoJobComparator.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FifoJobComparator.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FifoJobComparator.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FifoJobComparator.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/JobSchedulable.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/JobSchedulable.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/JobSchedulable.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/JobSchedulable.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/LoadManager.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/LoadManager.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/LoadManager.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/LoadManager.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/LocalityLevel.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/LocalityLevel.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/LocalityLevel.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/LocalityLevel.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/NewJobWeightBooster.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/NewJobWeightBooster.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/NewJobWeightBooster.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/NewJobWeightBooster.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/Pool.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/Pool.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/Pool.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/Pool.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/PoolManager.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/PoolManager.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/PoolManager.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/PoolManager.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/PoolSchedulable.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/PoolSchedulable.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/PoolSchedulable.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/PoolSchedulable.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/Schedulable.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/Schedulable.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/Schedulable.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/Schedulable.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/SchedulingAlgorithms.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/SchedulingAlgorithms.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/SchedulingAlgorithms.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/SchedulingAlgorithms.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/SchedulingMode.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/SchedulingMode.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/SchedulingMode.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/SchedulingMode.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/TaskSelector.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/TaskSelector.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/TaskSelector.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/TaskSelector.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/WeightAdjuster.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/WeightAdjuster.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/WeightAdjuster.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/WeightAdjuster.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/FakeSchedulable.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/FakeSchedulable.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/FakeSchedulable.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/FakeSchedulable.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestCapBasedLoadManager.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestCapBasedLoadManager.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestCapBasedLoadManager.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestCapBasedLoadManager.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestComputeFairShares.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestComputeFairShares.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestComputeFairShares.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestComputeFairShares.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairScheduler.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairScheduler.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairScheduler.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairScheduler.java
diff --git a/mapreduce/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairSchedulerSystem.java b/hadoop-mapreduce/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairSchedulerSystem.java
similarity index 100%
rename from mapreduce/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairSchedulerSystem.java
rename to hadoop-mapreduce/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairSchedulerSystem.java
diff --git a/mapreduce/src/contrib/gridmix/README b/hadoop-mapreduce/src/contrib/gridmix/README
similarity index 100%
rename from mapreduce/src/contrib/gridmix/README
rename to hadoop-mapreduce/src/contrib/gridmix/README
diff --git a/mapreduce/src/contrib/gridmix/build.xml b/hadoop-mapreduce/src/contrib/gridmix/build.xml
similarity index 100%
rename from mapreduce/src/contrib/gridmix/build.xml
rename to hadoop-mapreduce/src/contrib/gridmix/build.xml
diff --git a/hadoop-mapreduce/src/contrib/gridmix/ivy.xml b/hadoop-mapreduce/src/contrib/gridmix/ivy.xml
new file mode 100644
index 0000000..d141410
--- /dev/null
+++ b/hadoop-mapreduce/src/contrib/gridmix/ivy.xml
@@ -0,0 +1,141 @@
+<?xml version="1.0" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<ivy-module version="1.0">
+ <info organisation="org.apache.hadoop" module="${ant.project.name}">
+ <license name="Apache 2.0"/>
+ <description>Rumen</description>
+ </info>
+ <configurations defaultconfmapping="default">
+ <!--these match the Maven configurations-->
+ <conf name="default" extends="master,runtime"/>
+ <conf name="master" description="contains the artifact but no dependencies"/>
+ <conf name="runtime" description="runtime but not the artifact" />
+
+ <conf name="common" visibility="private" extends="runtime"
+ description="artifacts needed to compile/test the application"/>
+ <conf name="test" visibility="private" extends="runtime"/>
+ </configurations>
+
+ <publications>
+ <!--get the artifact from our module name-->
+ <artifact conf="master"/>
+ </publications>
+ <dependencies>
+ <dependency org="org.apache.hadoop"
+ name="hadoop-annotations"
+ rev="${hadoop-common.version}"
+ conf="common->default"/>
+ <dependency org="org.apache.hadoop"
+ name="hadoop-common"
+ rev="${hadoop-common.version}"
+ conf="common->default"/>
+ <dependency org="org.apache.hadoop"
+ name="hadoop-common-test"
+ rev="${hadoop-common.version}"
+ conf="test->default"/>
+ <dependency org="org.apache.hadoop"
+ name="hadoop-hdfs"
+ rev="${hadoop-hdfs.version}"
+ conf="common->default"/>
+ <dependency org="org.apache.hadoop"
+ name="hadoop-hdfs-test"
+ rev="${hadoop-hdfs.version}"
+ conf="test->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-mapreduce-client-core"
+ rev="${yarn.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-yarn-common"
+ rev="${yarn.version}" conf="common->default"/>
+ <dependency org="commons-logging"
+ name="commons-logging"
+ rev="${commons-logging.version}"
+ conf="common->default"/>
+ <dependency org="log4j"
+ name="log4j"
+ rev="${log4j.version}"
+ conf="common->master"/>
+ <dependency org="junit"
+ name="junit"
+ rev="${junit.version}"
+ conf="common->default"/>
+
+ <!-- necessary for Mini*Clusters -->
+ <dependency org="commons-httpclient"
+ name="commons-httpclient"
+ rev="${commons-httpclient.version}"
+ conf="common->master"/>
+ <dependency org="commons-codec"
+ name="commons-codec"
+ rev="${commons-codec.version}"
+ conf="common->default"/>
+ <dependency org="commons-net"
+ name="commons-net"
+ rev="${commons-net.version}"
+ conf="common->default"/>
+ <dependency org="org.mortbay.jetty"
+ name="jetty"
+ rev="${jetty.version}"
+ conf="common->master"/>
+ <dependency org="org.mortbay.jetty"
+ name="jetty-util"
+ rev="${jetty-util.version}"
+ conf="common->master"/>
+ <dependency org="org.mortbay.jetty"
+ name="jsp-api-2.1"
+ rev="${jetty.version}"
+ conf="common->master"/>
+ <dependency org="org.mortbay.jetty"
+ name="jsp-2.1"
+ rev="${jetty.version}"
+ conf="common->master"/>
+ <dependency org="org.mortbay.jetty"
+ name="servlet-api-2.5"
+ rev="${servlet-api-2.5.version}"
+ conf="common->master"/>
+ <dependency org="commons-cli"
+ name="commons-cli"
+ rev="${commons-cli.version}"
+ conf="common->default"/>
+ <dependency org="org.apache.avro"
+ name="avro"
+ rev="${avro.version}"
+ conf="common->default">
+ <exclude module="ant"/>
+ <exclude module="jetty"/>
+ <exclude module="slf4j-simple"/>
+ </dependency>
+ <dependency org="org.codehaus.jackson"
+ name="jackson-mapper-asl"
+ rev="${jackson.version}"
+ conf="common->default"/>
+ <dependency org="org.codehaus.jackson"
+ name="jackson-core-asl"
+ rev="${jackson.version}"
+ conf="common->default"/>
+ <dependency org="com.thoughtworks.paranamer"
+ name="paranamer"
+ rev="${paranamer.version}"
+ conf="common->default"/>
+
+ <!-- Exclusions for transitive dependencies pulled in by log4j -->
+ <exclude org="com.sun.jdmk"/>
+ <exclude org="com.sun.jmx"/>
+ <exclude org="javax.jms"/>
+ <exclude org="javax.mail"/>
+
+ </dependencies>
+</ivy-module>
diff --git a/mapreduce/src/contrib/gridmix/ivy/libraries.properties b/hadoop-mapreduce/src/contrib/gridmix/ivy/libraries.properties
similarity index 100%
rename from mapreduce/src/contrib/gridmix/ivy/libraries.properties
rename to hadoop-mapreduce/src/contrib/gridmix/ivy/libraries.properties
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/AvgRecordFactory.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/AvgRecordFactory.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/AvgRecordFactory.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/AvgRecordFactory.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/CompressionEmulationUtil.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/CompressionEmulationUtil.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/CompressionEmulationUtil.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/CompressionEmulationUtil.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/DistributedCacheEmulator.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/DistributedCacheEmulator.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/DistributedCacheEmulator.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/DistributedCacheEmulator.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/EchoUserResolver.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/EchoUserResolver.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/EchoUserResolver.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/EchoUserResolver.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/FilePool.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/FilePool.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/FilePool.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/FilePool.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/FileQueue.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/FileQueue.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/FileQueue.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/FileQueue.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateData.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateData.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateData.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateData.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Gridmix.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixJob.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixJob.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixJob.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixJob.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixJobSubmissionPolicy.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixJobSubmissionPolicy.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixJobSubmissionPolicy.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixJobSubmissionPolicy.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixKey.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixKey.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixKey.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixKey.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixRecord.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixRecord.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixRecord.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixRecord.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixSplit.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixSplit.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixSplit.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixSplit.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/InputStriper.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/InputStriper.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/InputStriper.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/InputStriper.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/IntermediateRecordFactory.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/IntermediateRecordFactory.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/IntermediateRecordFactory.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/IntermediateRecordFactory.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobCreator.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobCreator.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobCreator.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobCreator.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobFactory.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobFactory.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobFactory.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobFactory.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobSubmitter.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobSubmitter.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobSubmitter.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobSubmitter.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/LoadJob.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/LoadJob.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/LoadJob.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/LoadJob.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/LoadSplit.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/LoadSplit.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/LoadSplit.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/LoadSplit.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Progressive.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Progressive.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Progressive.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Progressive.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/PseudoLocalFs.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/PseudoLocalFs.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/PseudoLocalFs.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/PseudoLocalFs.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RandomAlgorithms.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RandomAlgorithms.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RandomAlgorithms.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RandomAlgorithms.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ReadRecordFactory.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ReadRecordFactory.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ReadRecordFactory.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ReadRecordFactory.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RecordFactory.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RecordFactory.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RecordFactory.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RecordFactory.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RoundRobinUserResolver.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RoundRobinUserResolver.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RoundRobinUserResolver.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RoundRobinUserResolver.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SleepJob.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SleepJob.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SleepJob.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SleepJob.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/StatListener.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/StatListener.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/StatListener.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/StatListener.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Statistics.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Statistics.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Statistics.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Statistics.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SubmitterUserResolver.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SubmitterUserResolver.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SubmitterUserResolver.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SubmitterUserResolver.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Summarizer.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Summarizer.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Summarizer.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Summarizer.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/UserResolver.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/UserResolver.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/UserResolver.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/UserResolver.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/CumulativeCpuUsageEmulatorPlugin.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/CumulativeCpuUsageEmulatorPlugin.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/CumulativeCpuUsageEmulatorPlugin.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/CumulativeCpuUsageEmulatorPlugin.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageEmulatorPlugin.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageEmulatorPlugin.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageEmulatorPlugin.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageEmulatorPlugin.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageMatcher.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageMatcher.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageMatcher.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageMatcher.java
diff --git a/mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/TotalHeapUsageEmulatorPlugin.java b/hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/TotalHeapUsageEmulatorPlugin.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/TotalHeapUsageEmulatorPlugin.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/TotalHeapUsageEmulatorPlugin.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/data/wordcount.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/data/wordcount.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/data/wordcount.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/data/wordcount.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/DebugJobFactory.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/DebugJobFactory.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/DebugJobFactory.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/DebugJobFactory.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/DebugJobProducer.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/DebugJobProducer.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/DebugJobProducer.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/DebugJobProducer.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/GridmixTestUtils.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/GridmixTestUtils.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/GridmixTestUtils.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/GridmixTestUtils.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationUtils.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationUtils.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationUtils.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationUtils.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestDistCacheEmulation.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestDistCacheEmulation.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestDistCacheEmulation.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestDistCacheEmulation.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestFilePool.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestFilePool.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestFilePool.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestFilePool.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestFileQueue.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestFileQueue.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestFileQueue.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestFileQueue.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixMemoryEmulation.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixMemoryEmulation.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixMemoryEmulation.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixMemoryEmulation.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixRecord.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixRecord.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixRecord.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixRecord.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSummary.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSummary.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSummary.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSummary.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestHighRamJob.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestHighRamJob.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestHighRamJob.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestHighRamJob.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestPseudoLocalFs.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestPseudoLocalFs.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestPseudoLocalFs.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestPseudoLocalFs.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestRandomTextDataGenerator.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestRandomTextDataGenerator.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestRandomTextDataGenerator.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestRandomTextDataGenerator.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestRecordFactory.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestRecordFactory.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestRecordFactory.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestRecordFactory.java
diff --git a/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestResourceUsageEmulators.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestResourceUsageEmulators.java
new file mode 100644
index 0000000..35db026
--- /dev/null
+++ b/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestResourceUsageEmulators.java
@@ -0,0 +1,613 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred.gridmix;
+
+import java.io.IOException;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.StatusReporter;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.TaskInputOutputContext;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
+import org.apache.hadoop.mapreduce.task.MapContextImpl;
+import org.apache.hadoop.mapreduce.util.ResourceCalculatorPlugin;
+import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin.ProcResourceValues;
+import org.apache.hadoop.tools.rumen.ResourceUsageMetrics;
+import org.apache.hadoop.mapred.DummyResourceCalculatorPlugin;
+import org.apache.hadoop.mapred.gridmix.LoadJob.ResourceUsageMatcherRunner;
+import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.CumulativeCpuUsageEmulatorPlugin;
+import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.ResourceUsageEmulatorPlugin;
+import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.ResourceUsageMatcher;
+import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.CumulativeCpuUsageEmulatorPlugin.DefaultCpuUsageEmulator;
+
+/**
+ * Test Gridmix's resource emulator framework and supported plugins.
+ */
+public class TestResourceUsageEmulators {
+ /**
+ * A {@link ResourceUsageEmulatorPlugin} implementation for testing purpose.
+ * It essentially creates a file named 'test' in the test directory.
+ */
+ static class TestResourceUsageEmulatorPlugin
+ implements ResourceUsageEmulatorPlugin {
+ static final Path rootTempDir =
+ new Path(System.getProperty("test.build.data", "/tmp"));
+ static final Path tempDir =
+ new Path(rootTempDir, "TestResourceUsageEmulatorPlugin");
+ static final String DEFAULT_IDENTIFIER = "test";
+
+ private Path touchPath = null;
+ private FileSystem fs = null;
+
+ @Override
+ public void emulate() throws IOException, InterruptedException {
+ // add some time between 2 calls to emulate()
+ try {
+ Thread.sleep(1000); // sleep for 1s
+ } catch (Exception e){}
+
+ try {
+ fs.delete(touchPath, false); // delete the touch file
+ //TODO Search for a better touch utility
+ fs.create(touchPath).close(); // recreate it
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ protected String getIdentifier() {
+ return DEFAULT_IDENTIFIER;
+ }
+
+ private static Path getFilePath(String id) {
+ return new Path(tempDir, id);
+ }
+
+ private static Path getInitFilePath(String id) {
+ return new Path(tempDir, id + ".init");
+ }
+
+ @Override
+ public void initialize(Configuration conf, ResourceUsageMetrics metrics,
+ ResourceCalculatorPlugin monitor, Progressive progress) {
+ // add some time between 2 calls to initialize()
+ try {
+ Thread.sleep(1000); // sleep for 1s
+ } catch (Exception e){}
+
+ try {
+ fs = FileSystem.getLocal(conf);
+
+ Path initPath = getInitFilePath(getIdentifier());
+ fs.delete(initPath, false); // delete the old file
+ fs.create(initPath).close(); // create a new one
+
+ touchPath = getFilePath(getIdentifier());
+ fs.delete(touchPath, false);
+ } catch (Exception e) {
+
+ } finally {
+ if (fs != null) {
+ try {
+ fs.deleteOnExit(tempDir);
+ } catch (IOException ioe){}
+ }
+ }
+ }
+
+ // test if the emulation framework successfully loaded this plugin
+ static long testInitialization(String id, Configuration conf)
+ throws IOException {
+ Path testPath = getInitFilePath(id);
+ FileSystem fs = FileSystem.getLocal(conf);
+ return fs.exists(testPath)
+ ? fs.getFileStatus(testPath).getModificationTime()
+ : 0;
+ }
+
+ // test if the emulation framework successfully loaded this plugin
+ static long testEmulation(String id, Configuration conf)
+ throws IOException {
+ Path testPath = getFilePath(id);
+ FileSystem fs = FileSystem.getLocal(conf);
+ return fs.exists(testPath)
+ ? fs.getFileStatus(testPath).getModificationTime()
+ : 0;
+ }
+ }
+
+ /**
+ * Test implementation of {@link ResourceUsageEmulatorPlugin} which creates
+ * a file named 'others' in the test directory.
+ */
+ static class TestOthers extends TestResourceUsageEmulatorPlugin {
+ static final String ID = "others";
+
+ @Override
+ protected String getIdentifier() {
+ return ID;
+ }
+ }
+
+ /**
+ * Test implementation of {@link ResourceUsageEmulatorPlugin} which creates
+ * a file named 'cpu' in the test directory.
+ */
+ static class TestCpu extends TestResourceUsageEmulatorPlugin {
+ static final String ID = "cpu";
+
+ @Override
+ protected String getIdentifier() {
+ return ID;
+ }
+ }
+
+ /**
+ * Test {@link ResourceUsageMatcher}.
+ */
+ @Test
+ public void testResourceUsageMatcher() throws Exception {
+ ResourceUsageMatcher matcher = new ResourceUsageMatcher();
+ Configuration conf = new Configuration();
+ conf.setClass(ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS,
+ TestResourceUsageEmulatorPlugin.class,
+ ResourceUsageEmulatorPlugin.class);
+ long currentTime = System.currentTimeMillis();
+
+ matcher.configure(conf, null, null, null);
+
+ matcher.matchResourceUsage();
+
+ String id = TestResourceUsageEmulatorPlugin.DEFAULT_IDENTIFIER;
+ long result =
+ TestResourceUsageEmulatorPlugin.testInitialization(id, conf);
+ assertTrue("Resource usage matcher failed to initialize the configured"
+ + " plugin", result > currentTime);
+ result = TestResourceUsageEmulatorPlugin.testEmulation(id, conf);
+ assertTrue("Resource usage matcher failed to load and emulate the"
+ + " configured plugin", result > currentTime);
+
+ // test plugin order to first emulate cpu and then others
+ conf.setStrings(ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS,
+ TestCpu.class.getName() + "," + TestOthers.class.getName());
+
+ matcher.configure(conf, null, null, null);
+
+ // test the initialization order
+ long time1 =
+ TestResourceUsageEmulatorPlugin.testInitialization(TestCpu.ID, conf);
+ long time2 =
+ TestResourceUsageEmulatorPlugin.testInitialization(TestOthers.ID,
+ conf);
+ assertTrue("Resource usage matcher failed to initialize the configured"
+ + " plugins in order", time1 < time2);
+
+ matcher.matchResourceUsage();
+
+ // Note that the cpu usage emulator plugin is configured 1st and then the
+ // others plugin.
+ time1 =
+ TestResourceUsageEmulatorPlugin.testInitialization(TestCpu.ID, conf);
+ time2 =
+ TestResourceUsageEmulatorPlugin.testInitialization(TestOthers.ID,
+ conf);
+ assertTrue("Resource usage matcher failed to load the configured plugins",
+ time1 < time2);
+ }
+
+ /**
+ * Fakes the cumulative usage using {@link FakeCpuUsageEmulatorCore}.
+ */
+ static class FakeResourceUsageMonitor extends DummyResourceCalculatorPlugin {
+ private FakeCpuUsageEmulatorCore core;
+
+ public FakeResourceUsageMonitor(FakeCpuUsageEmulatorCore core) {
+ this.core = core;
+ }
+
+ /**
+ * A dummy CPU usage monitor. Every call to
+ * {@link ResourceCalculatorPlugin#getCumulativeCpuTime()} will return the
+ * value of {@link FakeCpuUsageEmulatorCore#getNumCalls()}.
+ */
+ @Override
+ public long getCumulativeCpuTime() {
+ return core.getCpuUsage();
+ }
+
+ /**
+ * Returns a {@link ProcResourceValues} with cumulative cpu usage
+ * computed using {@link #getCumulativeCpuTime()}.
+ */
+ @Override
+ public ProcResourceValues getProcResourceValues() {
+ long usageValue = getCumulativeCpuTime();
+ return new ProcResourceValues(usageValue, -1, -1);
+ }
+ }
+
+ /**
+ * A dummy {@link Progressive} implementation that allows users to set the
+ * progress for testing. The {@link Progressive#getProgress()} call will
+ * return the last progress value set using
+ * {@link FakeProgressive#setProgress(float)}.
+ */
+ static class FakeProgressive implements Progressive {
+ private float progress = 0F;
+ @Override
+ public float getProgress() {
+ return progress;
+ }
+
+ void setProgress(float progress) {
+ this.progress = progress;
+ }
+ }
+
+ /**
+ * A dummy reporter for {@link LoadJob.ResourceUsageMatcherRunner}.
+ */
+ private static class DummyReporter extends StatusReporter {
+ private Progressive progress;
+
+ DummyReporter(Progressive progress) {
+ this.progress = progress;
+ }
+
+ @Override
+ public org.apache.hadoop.mapreduce.Counter getCounter(Enum<?> name) {
+ return null;
+ }
+
+ @Override
+ public org.apache.hadoop.mapreduce.Counter getCounter(String group,
+ String name) {
+ return null;
+ }
+
+ @Override
+ public void progress() {
+ }
+
+ @Override
+ public float getProgress() {
+ return progress.getProgress();
+ }
+
+ @Override
+ public void setStatus(String status) {
+ }
+ }
+
+ // Extends ResourceUsageMatcherRunner for testing.
+ @SuppressWarnings("unchecked")
+ private static class FakeResourceUsageMatcherRunner
+ extends ResourceUsageMatcherRunner {
+ FakeResourceUsageMatcherRunner(TaskInputOutputContext context,
+ ResourceUsageMetrics metrics) {
+ super(context, metrics);
+ }
+
+ // test ResourceUsageMatcherRunner
+ void test() throws Exception {
+ super.match();
+ }
+ }
+
+ /**
+ * Test {@link LoadJob.ResourceUsageMatcherRunner}.
+ */
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testResourceUsageMatcherRunner() throws Exception {
+ Configuration conf = new Configuration();
+ FakeProgressive progress = new FakeProgressive();
+
+ // set the resource calculator plugin
+ conf.setClass(TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN,
+ DummyResourceCalculatorPlugin.class,
+ ResourceCalculatorPlugin.class);
+ // set the resources
+ // set the resource implementation class
+ conf.setClass(ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS,
+ TestResourceUsageEmulatorPlugin.class,
+ ResourceUsageEmulatorPlugin.class);
+
+ long currentTime = System.currentTimeMillis();
+
+ // initialize the matcher class
+ TaskAttemptID id = new TaskAttemptID("test", 1, TaskType.MAP, 1, 1);
+ StatusReporter reporter = new DummyReporter(progress);
+ TaskInputOutputContext context =
+ new MapContextImpl(conf, id, null, null, null, reporter, null);
+ FakeResourceUsageMatcherRunner matcher =
+ new FakeResourceUsageMatcherRunner(context, null);
+
+ // check if the matcher initialized the plugin
+ String identifier = TestResourceUsageEmulatorPlugin.DEFAULT_IDENTIFIER;
+ long initTime =
+ TestResourceUsageEmulatorPlugin.testInitialization(identifier, conf);
+ assertTrue("ResourceUsageMatcherRunner failed to initialize the"
+ + " configured plugin", initTime > currentTime);
+
+ // check the progress
+ assertEquals("Progress mismatch in ResourceUsageMatcherRunner",
+ 0, progress.getProgress(), 0D);
+
+ // call match() and check progress
+ progress.setProgress(0.01f);
+ currentTime = System.currentTimeMillis();
+ matcher.test();
+ long emulateTime =
+ TestResourceUsageEmulatorPlugin.testEmulation(identifier, conf);
+ assertTrue("ProgressBasedResourceUsageMatcher failed to load and emulate"
+ + " the configured plugin", emulateTime > currentTime);
+ }
+
+ /**
+ * Test {@link CumulativeCpuUsageEmulatorPlugin}'s core CPU usage emulation
+ * engine.
+ */
+ @Test
+ public void testCpuUsageEmulator() throws IOException {
+ // test CpuUsageEmulator calibration with fake resource calculator plugin
+ long target = 100000L; // 100 secs
+ int unitUsage = 50;
+ FakeCpuUsageEmulatorCore fakeCpuEmulator = new FakeCpuUsageEmulatorCore();
+ fakeCpuEmulator.setUnitUsage(unitUsage);
+ FakeResourceUsageMonitor fakeMonitor =
+ new FakeResourceUsageMonitor(fakeCpuEmulator);
+
+ // calibrate for 100ms
+ fakeCpuEmulator.calibrate(fakeMonitor, target);
+
+ // by default, CpuUsageEmulator.calibrate() will consume 100ms of CPU usage
+ assertEquals("Fake calibration failed",
+ 100, fakeMonitor.getCumulativeCpuTime());
+ assertEquals("Fake calibration failed",
+ 100, fakeCpuEmulator.getCpuUsage());
+ // by default, CpuUsageEmulator.performUnitComputation() will be called
+ // twice
+ assertEquals("Fake calibration failed",
+ 2, fakeCpuEmulator.getNumCalls());
+ }
+
+ /**
+ * This is a dummy class that fakes CPU usage.
+ */
+ private static class FakeCpuUsageEmulatorCore
+ extends DefaultCpuUsageEmulator {
+ private int numCalls = 0;
+ private int unitUsage = 1;
+ private int cpuUsage = 0;
+
+ @Override
+ protected void performUnitComputation() {
+ ++numCalls;
+ cpuUsage += unitUsage;
+ }
+
+ int getNumCalls() {
+ return numCalls;
+ }
+
+ int getCpuUsage() {
+ return cpuUsage;
+ }
+
+ void reset() {
+ numCalls = 0;
+ cpuUsage = 0;
+ }
+
+ void setUnitUsage(int unitUsage) {
+ this.unitUsage = unitUsage;
+ }
+ }
+
+ // Creates a ResourceUsageMetrics object from the target usage
+ static ResourceUsageMetrics createMetrics(long target) {
+ ResourceUsageMetrics metrics = new ResourceUsageMetrics();
+ metrics.setCumulativeCpuUsage(target);
+ metrics.setVirtualMemoryUsage(target);
+ metrics.setPhysicalMemoryUsage(target);
+ metrics.setHeapUsage(target);
+ return metrics;
+ }
+
+ /**
+ * Test {@link CumulativeCpuUsageEmulatorPlugin}.
+ */
+ @Test
+ public void testCumulativeCpuUsageEmulatorPlugin() throws Exception {
+ Configuration conf = new Configuration();
+ long targetCpuUsage = 1000L;
+ int unitCpuUsage = 50;
+
+ // fake progress indicator
+ FakeProgressive fakeProgress = new FakeProgressive();
+
+ // fake cpu usage generator
+ FakeCpuUsageEmulatorCore fakeCore = new FakeCpuUsageEmulatorCore();
+ fakeCore.setUnitUsage(unitCpuUsage);
+
+ // a cumulative cpu usage emulator with fake core
+ CumulativeCpuUsageEmulatorPlugin cpuPlugin =
+ new CumulativeCpuUsageEmulatorPlugin(fakeCore);
+
+ // test with invalid or missing resource usage value
+ ResourceUsageMetrics invalidUsage = createMetrics(0);
+ cpuPlugin.initialize(conf, invalidUsage, null, null);
+
+ // test if disabled cpu emulation plugin's emulate() call is a no-operation
+ // this will test if the emulation plugin is disabled or not
+ int numCallsPre = fakeCore.getNumCalls();
+ long cpuUsagePre = fakeCore.getCpuUsage();
+ cpuPlugin.emulate();
+ int numCallsPost = fakeCore.getNumCalls();
+ long cpuUsagePost = fakeCore.getCpuUsage();
+
+ // test if no calls are made cpu usage emulator core
+ assertEquals("Disabled cumulative CPU usage emulation plugin works!",
+ numCallsPre, numCallsPost);
+
+ // test if no calls are made cpu usage emulator core
+ assertEquals("Disabled cumulative CPU usage emulation plugin works!",
+ cpuUsagePre, cpuUsagePost);
+
+ // test with valid resource usage value
+ ResourceUsageMetrics metrics = createMetrics(targetCpuUsage);
+
+ // fake monitor
+ ResourceCalculatorPlugin monitor = new FakeResourceUsageMonitor(fakeCore);
+
+ // test with default emulation interval
+ testEmulationAccuracy(conf, fakeCore, monitor, metrics, cpuPlugin,
+ targetCpuUsage, targetCpuUsage / unitCpuUsage);
+
+ // test with custom value for emulation interval of 20%
+ conf.setFloat(CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL,
+ 0.2F);
+ testEmulationAccuracy(conf, fakeCore, monitor, metrics, cpuPlugin,
+ targetCpuUsage, targetCpuUsage / unitCpuUsage);
+
+ // test if emulation interval boundary is respected (unit usage = 1)
+ // test the case where the current progress is less than threshold
+ fakeProgress = new FakeProgressive(); // initialize
+ fakeCore.reset();
+ fakeCore.setUnitUsage(1);
+ conf.setFloat(CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL,
+ 0.25F);
+ cpuPlugin.initialize(conf, metrics, monitor, fakeProgress);
+ // take a snapshot after the initialization
+ long initCpuUsage = monitor.getCumulativeCpuTime();
+ long initNumCalls = fakeCore.getNumCalls();
+ // test with 0 progress
+ testEmulationBoundary(0F, fakeCore, fakeProgress, cpuPlugin, initCpuUsage,
+ initNumCalls, "[no-op, 0 progress]");
+ // test with 24% progress
+ testEmulationBoundary(0.24F, fakeCore, fakeProgress, cpuPlugin,
+ initCpuUsage, initNumCalls, "[no-op, 24% progress]");
+ // test with 25% progress
+ // target = 1000ms, target emulation at 25% = 250ms,
+ // weighed target = 1000 * 0.25^4 (we are using progress^4 as the weight)
+ // ~ 4
+ // but current usage = init-usage = 100, hence expected = 100
+ testEmulationBoundary(0.25F, fakeCore, fakeProgress, cpuPlugin,
+ initCpuUsage, initNumCalls, "[op, 25% progress]");
+
+ // test with 80% progress
+ // target = 1000ms, target emulation at 80% = 800ms,
+ // weighed target = 1000 * 0.25^4 (we are using progress^4 as the weight)
+ // ~ 410
+ // current-usage = init-usage = 100, hence expected-usage = 410
+ testEmulationBoundary(0.80F, fakeCore, fakeProgress, cpuPlugin, 410, 410,
+ "[op, 80% progress]");
+
+ // now test if the final call with 100% progress ramps up the CPU usage
+ testEmulationBoundary(1F, fakeCore, fakeProgress, cpuPlugin, targetCpuUsage,
+ targetCpuUsage, "[op, 100% progress]");
+
+ // test if emulation interval boundary is respected (unit usage = 50)
+ // test the case where the current progress is less than threshold
+ fakeProgress = new FakeProgressive(); // initialize
+ fakeCore.reset();
+ fakeCore.setUnitUsage(unitCpuUsage);
+ conf.setFloat(CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL,
+ 0.40F);
+ cpuPlugin.initialize(conf, metrics, monitor, fakeProgress);
+ // take a snapshot after the initialization
+ initCpuUsage = monitor.getCumulativeCpuTime();
+ initNumCalls = fakeCore.getNumCalls();
+ // test with 0 progress
+ testEmulationBoundary(0F, fakeCore, fakeProgress, cpuPlugin, initCpuUsage,
+ initNumCalls, "[no-op, 0 progress]");
+ // test with 39% progress
+ testEmulationBoundary(0.39F, fakeCore, fakeProgress, cpuPlugin,
+ initCpuUsage, initNumCalls, "[no-op, 39% progress]");
+ // test with 40% progress
+ // target = 1000ms, target emulation at 40% = 4000ms,
+ // weighed target = 1000 * 0.40^4 (we are using progress^4 as the weight)
+ // ~ 26
+ // current-usage = init-usage = 100, hence expected-usage = 100
+ testEmulationBoundary(0.40F, fakeCore, fakeProgress, cpuPlugin,
+ initCpuUsage, initNumCalls, "[op, 40% progress]");
+
+ // test with 90% progress
+ // target = 1000ms, target emulation at 90% = 900ms,
+ // weighed target = 1000 * 0.90^4 (we are using progress^4 as the weight)
+ // ~ 657
+ // current-usage = init-usage = 100, hence expected-usage = 657 but
+ // the fake-core increases in steps of 50, hence final target = 700
+ testEmulationBoundary(0.90F, fakeCore, fakeProgress, cpuPlugin, 700,
+ 700 / unitCpuUsage, "[op, 90% progress]");
+
+ // now test if the final call with 100% progress ramps up the CPU usage
+ testEmulationBoundary(1F, fakeCore, fakeProgress, cpuPlugin, targetCpuUsage,
+ targetCpuUsage / unitCpuUsage, "[op, 100% progress]");
+ }
+
+ // test whether the CPU usage emulator achieves the desired target using
+ // desired calls to the underling core engine.
+ private static void testEmulationAccuracy(Configuration conf,
+ FakeCpuUsageEmulatorCore fakeCore,
+ ResourceCalculatorPlugin monitor,
+ ResourceUsageMetrics metrics,
+ CumulativeCpuUsageEmulatorPlugin cpuPlugin,
+ long expectedTotalCpuUsage, long expectedTotalNumCalls)
+ throws Exception {
+ FakeProgressive fakeProgress = new FakeProgressive();
+ fakeCore.reset();
+ cpuPlugin.initialize(conf, metrics, monitor, fakeProgress);
+ int numLoops = 0;
+ while (fakeProgress.getProgress() < 1) {
+ ++numLoops;
+ float progress = (float)numLoops / 100;
+ fakeProgress.setProgress(progress);
+ cpuPlugin.emulate();
+ }
+
+ // test if the resource plugin shows the expected invocations
+ assertEquals("Cumulative cpu usage emulator plugin failed (num calls)!",
+ expectedTotalNumCalls, fakeCore.getNumCalls(), 0L);
+ // test if the resource plugin shows the expected usage
+ assertEquals("Cumulative cpu usage emulator plugin failed (total usage)!",
+ expectedTotalCpuUsage, fakeCore.getCpuUsage(), 0L);
+ }
+
+ // tests if the CPU usage emulation plugin emulates only at the expected
+ // progress gaps
+ private static void testEmulationBoundary(float progress,
+ FakeCpuUsageEmulatorCore fakeCore, FakeProgressive fakeProgress,
+ CumulativeCpuUsageEmulatorPlugin cpuPlugin, long expectedTotalCpuUsage,
+ long expectedTotalNumCalls, String info) throws Exception {
+ fakeProgress.setProgress(progress);
+ cpuPlugin.emulate();
+
+ assertEquals("Emulation interval test for cpu usage failed " + info + "!",
+ expectedTotalCpuUsage, fakeCore.getCpuUsage(), 0L);
+ assertEquals("Emulation interval test for num calls failed " + info + "!",
+ expectedTotalNumCalls, fakeCore.getNumCalls(), 0L);
+ }
+}
diff --git a/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestSleepJob.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestSleepJob.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestSleepJob.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestSleepJob.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestUserResolve.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestUserResolve.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestUserResolve.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestUserResolve.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/GridmixSystemTestCase.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/GridmixSystemTestCase.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/GridmixSystemTestCase.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/GridmixSystemTestCase.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationEnableForAllTypesOfJobs.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationEnableForAllTypesOfJobs.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationEnableForAllTypesOfJobs.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationEnableForAllTypesOfJobs.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForCompressInAndUncompressOut.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForCompressInAndUncompressOut.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForCompressInAndUncompressOut.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForCompressInAndUncompressOut.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForUncompressInAndCompressOut.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForUncompressInAndCompressOut.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForUncompressInAndCompressOut.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForUncompressInAndCompressOut.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestDisableGridmixEmulationOfHighRam.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestDisableGridmixEmulationOfHighRam.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestDisableGridmixEmulationOfHighRam.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestDisableGridmixEmulationOfHighRam.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSAndLocalFSDCFiles.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSAndLocalFSDCFiles.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSAndLocalFSDCFiles.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSAndLocalFSDCFiles.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFileUsesMultipleJobs.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFileUsesMultipleJobs.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFileUsesMultipleJobs.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFileUsesMultipleJobs.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFilesWithDifferentVisibilities.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFilesWithDifferentVisibilities.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFilesWithDifferentVisibilities.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFilesWithDifferentVisibilities.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHighRamAndNormalMRJobs.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHighRamAndNormalMRJobs.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHighRamAndNormalMRJobs.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHighRamAndNormalMRJobs.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfLocalFSDCFiles.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfLocalFSDCFiles.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfLocalFSDCFiles.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfLocalFSDCFiles.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixDataGeneration.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixDataGeneration.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixDataGeneration.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixDataGeneration.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixFilePool.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixFilePool.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixFilePool.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixFilePool.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressedInputGeneration.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressedInputGeneration.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressedInputGeneration.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressedInputGeneration.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressionEmulationWithCompressInput.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressionEmulationWithCompressInput.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressionEmulationWithCompressInput.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressionEmulationWithCompressInput.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPrivateDCFile.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPrivateDCFile.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPrivateDCFile.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPrivateDCFile.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPublicDCFile.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPublicDCFile.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPublicDCFile.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPublicDCFile.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase1.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase1.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase1.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase1.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase2.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase2.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase2.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase2.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase3.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase3.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase3.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase3.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPrivateDCFiles.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPrivateDCFiles.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPrivateDCFiles.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPrivateDCFiles.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPublicDCFiles.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPublicDCFiles.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPublicDCFiles.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPublicDCFiles.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith10minTrace.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith10minTrace.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith10minTrace.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith10minTrace.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith12minTrace.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith12minTrace.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith12minTrace.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith12minTrace.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith1minTrace.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith1minTrace.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith1minTrace.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith1minTrace.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith2minStreamingJobTrace.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith2minStreamingJobTrace.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith2minStreamingJobTrace.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith2minStreamingJobTrace.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minStreamingJobTrace.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minStreamingJobTrace.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minStreamingJobTrace.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minStreamingJobTrace.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minTrace.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minTrace.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minTrace.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minTrace.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minStreamingJobTrace.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minStreamingJobTrace.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minStreamingJobTrace.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minStreamingJobTrace.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minTrace.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minTrace.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minTrace.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minTrace.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith7minTrace.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith7minTrace.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith7minTrace.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith7minTrace.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixConfig.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixConfig.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixConfig.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixConfig.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixRunMode.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixRunMode.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixRunMode.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixRunMode.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobStory.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobStory.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobStory.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobStory.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobSubmission.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobSubmission.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobSubmission.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobSubmission.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobVerification.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobVerification.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobVerification.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobVerification.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/UtilsForGridmix.java b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/UtilsForGridmix.java
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/UtilsForGridmix.java
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/UtilsForGridmix.java
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/2m_stream_trace.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/2m_stream_trace.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/2m_stream_trace.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/2m_stream_trace.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/3m_stream_trace.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/3m_stream_trace.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/3m_stream_trace.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/3m_stream_trace.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/5m_stream_trace.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/5m_stream_trace.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/5m_stream_trace.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/5m_stream_trace.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/compression_case1_trace.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/compression_case1_trace.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/compression_case1_trace.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/compression_case1_trace.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/compression_case2_trace.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/compression_case2_trace.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/compression_case2_trace.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/compression_case2_trace.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/compression_case3_trace.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/compression_case3_trace.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/compression_case3_trace.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/compression_case3_trace.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/compression_case4_trace.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/compression_case4_trace.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/compression_case4_trace.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/compression_case4_trace.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case1_trace.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case1_trace.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case1_trace.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case1_trace.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case2_trace.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case2_trace.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case2_trace.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case2_trace.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case3_trace.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case3_trace.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case3_trace.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case3_trace.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case4_trace.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case4_trace.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case4_trace.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case4_trace.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case5_trace.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case5_trace.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case5_trace.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case5_trace.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case6_trace.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case6_trace.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case6_trace.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case6_trace.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case7_trace.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case7_trace.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case7_trace.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case7_trace.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case8_trace.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case8_trace.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case8_trace.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case8_trace.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case9_trace.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case9_trace.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case9_trace.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/distcache_case9_trace.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case1.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case1.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case1.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case1.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case2.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case2.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case2.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case2.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case3.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case3.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case3.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case3.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case4.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case4.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case4.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case4.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/trace_10m.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/trace_10m.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/trace_10m.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/trace_10m.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/trace_12m.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/trace_12m.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/trace_12m.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/trace_12m.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/trace_1m.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/trace_1m.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/trace_1m.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/trace_1m.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/trace_3m.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/trace_3m.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/trace_3m.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/trace_3m.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/trace_5m.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/trace_5m.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/trace_5m.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/trace_5m.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/gridmix/src/test/system/resources/trace_7m.json.gz b/hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/trace_7m.json.gz
similarity index 100%
rename from mapreduce/src/contrib/gridmix/src/test/system/resources/trace_7m.json.gz
rename to hadoop-mapreduce/src/contrib/gridmix/src/test/system/resources/trace_7m.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/index/README b/hadoop-mapreduce/src/contrib/index/README
similarity index 100%
rename from mapreduce/src/contrib/index/README
rename to hadoop-mapreduce/src/contrib/index/README
diff --git a/mapreduce/src/contrib/index/build.xml b/hadoop-mapreduce/src/contrib/index/build.xml
similarity index 100%
rename from mapreduce/src/contrib/index/build.xml
rename to hadoop-mapreduce/src/contrib/index/build.xml
diff --git a/mapreduce/src/contrib/index/conf/index-config.xml.template b/hadoop-mapreduce/src/contrib/index/conf/index-config.xml.template
similarity index 100%
rename from mapreduce/src/contrib/index/conf/index-config.xml.template
rename to hadoop-mapreduce/src/contrib/index/conf/index-config.xml.template
diff --git a/hadoop-mapreduce/src/contrib/index/ivy.xml b/hadoop-mapreduce/src/contrib/index/ivy.xml
new file mode 100644
index 0000000..be0b458
--- /dev/null
+++ b/hadoop-mapreduce/src/contrib/index/ivy.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<ivy-module version="1.0">
+ <info organisation="org.apache.hadoop" module="${ant.project.name}">
+ <license name="Apache 2.0"/>
+ <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
+ <description>
+ Apache Hadoop
+ </description>
+ </info>
+ <configurations defaultconfmapping="default">
+ <!--these match the Maven configurations-->
+ <conf name="default" extends="master,runtime"/>
+ <conf name="master" description="contains the artifact but no dependencies"/>
+ <conf name="runtime" description="runtime but not the artifact" />
+
+ <conf name="common" visibility="private"
+ extends="runtime"
+ description="artifacts needed to compile/test the application"/>
+ <conf name="test" visibility="private" extends="runtime"/>
+ </configurations>
+
+ <publications>
+ <!--get the artifact from our module name-->
+ <artifact conf="master"/>
+ </publications>
+ <dependencies>
+ <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-common" rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-common-test" rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-hdfs" rev="${hadoop-hdfs.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-hdfs-test" rev="${hadoop-hdfs.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-mapreduce-client-core"
+ rev="${yarn.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-yarn-common"
+ rev="${yarn.version}" conf="common->default"/>
+ <dependency org="commons-logging" name="commons-logging" rev="${commons-logging.version}" conf="common->default"/>
+ <dependency org="log4j" name="log4j" rev="${log4j.version}" conf="common->master"/>
+ <dependency org="org.apache.lucene" name="lucene-core" rev="${lucene-core.version}" conf="common->default"/>
+ <dependency org="junit" name="junit" rev="${junit.version}" conf="common->default"/>
+
+ <!-- Exclusions for transitive dependencies pulled in by log4j -->
+ <exclude org="com.sun.jdmk"/>
+ <exclude org="com.sun.jmx"/>
+ <exclude org="javax.jms"/>
+ <exclude org="javax.mail"/>
+
+ </dependencies>
+</ivy-module>
diff --git a/mapreduce/src/contrib/index/ivy/libraries.properties b/hadoop-mapreduce/src/contrib/index/ivy/libraries.properties
similarity index 100%
rename from mapreduce/src/contrib/index/ivy/libraries.properties
rename to hadoop-mapreduce/src/contrib/index/ivy/libraries.properties
diff --git a/mapreduce/src/contrib/index/sample/data.txt b/hadoop-mapreduce/src/contrib/index/sample/data.txt
similarity index 100%
rename from mapreduce/src/contrib/index/sample/data.txt
rename to hadoop-mapreduce/src/contrib/index/sample/data.txt
diff --git a/mapreduce/src/contrib/index/sample/data2.txt b/hadoop-mapreduce/src/contrib/index/sample/data2.txt
similarity index 100%
rename from mapreduce/src/contrib/index/sample/data2.txt
rename to hadoop-mapreduce/src/contrib/index/sample/data2.txt
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/HashingDistributionPolicy.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/HashingDistributionPolicy.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/HashingDistributionPolicy.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/HashingDistributionPolicy.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/IdentityLocalAnalysis.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/IdentityLocalAnalysis.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/IdentityLocalAnalysis.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/IdentityLocalAnalysis.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocInputFormat.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocInputFormat.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocInputFormat.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocInputFormat.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocLocalAnalysis.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocLocalAnalysis.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocLocalAnalysis.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocLocalAnalysis.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocRecordReader.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocRecordReader.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocRecordReader.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocRecordReader.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocTextAndOp.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocTextAndOp.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocTextAndOp.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocTextAndOp.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/RoundRobinDistributionPolicy.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/RoundRobinDistributionPolicy.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/RoundRobinDistributionPolicy.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/RoundRobinDistributionPolicy.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/FileSystemDirectory.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/FileSystemDirectory.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/FileSystemDirectory.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/FileSystemDirectory.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/LuceneIndexFileNameFilter.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/LuceneIndexFileNameFilter.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/LuceneIndexFileNameFilter.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/LuceneIndexFileNameFilter.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/LuceneUtil.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/LuceneUtil.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/LuceneUtil.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/LuceneUtil.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/MixedDeletionPolicy.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/MixedDeletionPolicy.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/MixedDeletionPolicy.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/MixedDeletionPolicy.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/MixedDirectory.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/MixedDirectory.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/MixedDirectory.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/MixedDirectory.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/RAMDirectoryUtil.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/RAMDirectoryUtil.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/RAMDirectoryUtil.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/RAMDirectoryUtil.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/ShardWriter.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/ShardWriter.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/ShardWriter.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/ShardWriter.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/main/UpdateIndex.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/main/UpdateIndex.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/main/UpdateIndex.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/main/UpdateIndex.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/DocumentAndOp.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/DocumentAndOp.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/DocumentAndOp.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/DocumentAndOp.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/DocumentID.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/DocumentID.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/DocumentID.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/DocumentID.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IDistributionPolicy.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IDistributionPolicy.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IDistributionPolicy.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IDistributionPolicy.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IIndexUpdater.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IIndexUpdater.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IIndexUpdater.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IIndexUpdater.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/ILocalAnalysis.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/ILocalAnalysis.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/ILocalAnalysis.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/ILocalAnalysis.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateCombiner.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateCombiner.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateCombiner.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateCombiner.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateConfiguration.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateConfiguration.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateConfiguration.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateConfiguration.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateMapper.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateMapper.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateMapper.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateMapper.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateOutputFormat.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateOutputFormat.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateOutputFormat.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateOutputFormat.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdatePartitioner.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdatePartitioner.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdatePartitioner.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdatePartitioner.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateReducer.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateReducer.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateReducer.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateReducer.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdater.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdater.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdater.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdater.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IntermediateForm.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IntermediateForm.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IntermediateForm.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IntermediateForm.java
diff --git a/mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/Shard.java b/hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/Shard.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/Shard.java
rename to hadoop-mapreduce/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/Shard.java
diff --git a/mapreduce/src/contrib/index/src/test/org/apache/hadoop/contrib/index/lucene/TestMixedDirectory.java b/hadoop-mapreduce/src/contrib/index/src/test/org/apache/hadoop/contrib/index/lucene/TestMixedDirectory.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/test/org/apache/hadoop/contrib/index/lucene/TestMixedDirectory.java
rename to hadoop-mapreduce/src/contrib/index/src/test/org/apache/hadoop/contrib/index/lucene/TestMixedDirectory.java
diff --git a/mapreduce/src/contrib/index/src/test/org/apache/hadoop/contrib/index/mapred/TestDistributionPolicy.java b/hadoop-mapreduce/src/contrib/index/src/test/org/apache/hadoop/contrib/index/mapred/TestDistributionPolicy.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/test/org/apache/hadoop/contrib/index/mapred/TestDistributionPolicy.java
rename to hadoop-mapreduce/src/contrib/index/src/test/org/apache/hadoop/contrib/index/mapred/TestDistributionPolicy.java
diff --git a/mapreduce/src/contrib/index/src/test/org/apache/hadoop/contrib/index/mapred/TestIndexUpdater.java b/hadoop-mapreduce/src/contrib/index/src/test/org/apache/hadoop/contrib/index/mapred/TestIndexUpdater.java
similarity index 100%
rename from mapreduce/src/contrib/index/src/test/org/apache/hadoop/contrib/index/mapred/TestIndexUpdater.java
rename to hadoop-mapreduce/src/contrib/index/src/test/org/apache/hadoop/contrib/index/mapred/TestIndexUpdater.java
diff --git a/mapreduce/src/contrib/mumak/bin/mumak.sh b/hadoop-mapreduce/src/contrib/mumak/bin/mumak.sh
similarity index 100%
rename from mapreduce/src/contrib/mumak/bin/mumak.sh
rename to hadoop-mapreduce/src/contrib/mumak/bin/mumak.sh
diff --git a/mapreduce/src/contrib/mumak/build.xml b/hadoop-mapreduce/src/contrib/mumak/build.xml
similarity index 100%
rename from mapreduce/src/contrib/mumak/build.xml
rename to hadoop-mapreduce/src/contrib/mumak/build.xml
diff --git a/mapreduce/src/contrib/mumak/conf/log4j.properties b/hadoop-mapreduce/src/contrib/mumak/conf/log4j.properties
similarity index 100%
rename from mapreduce/src/contrib/mumak/conf/log4j.properties
rename to hadoop-mapreduce/src/contrib/mumak/conf/log4j.properties
diff --git a/mapreduce/src/contrib/mumak/conf/mumak.xml b/hadoop-mapreduce/src/contrib/mumak/conf/mumak.xml
similarity index 100%
rename from mapreduce/src/contrib/mumak/conf/mumak.xml
rename to hadoop-mapreduce/src/contrib/mumak/conf/mumak.xml
diff --git a/hadoop-mapreduce/src/contrib/mumak/ivy.xml b/hadoop-mapreduce/src/contrib/mumak/ivy.xml
new file mode 100644
index 0000000..9d5369d
--- /dev/null
+++ b/hadoop-mapreduce/src/contrib/mumak/ivy.xml
@@ -0,0 +1,141 @@
+<?xml version="1.0" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<ivy-module version="1.0">
+ <info organisation="org.apache.hadoop" module="${ant.project.name}">
+ <license name="Apache 2.0"/>
+ <description>
+ Mumak
+ </description>
+ </info>
+ <configurations defaultconfmapping="default">
+ <!--these match the Maven configurations-->
+ <conf name="default" extends="master,runtime"/>
+ <conf name="master" description="contains the artifact but no dependencies"/>
+ <conf name="runtime" description="runtime but not the artifact" />
+
+ <conf name="common" visibility="private"
+ extends="runtime"
+ description="artifacts needed to compile/test the application"/>
+ <conf name="test" visibility="private" extends="master,common,runtime"/>
+ </configurations>
+
+ <publications>
+ <!--get the artifact from our module name-->
+ <artifact conf="master"/>
+ </publications>
+ <dependencies>
+ <dependency org="org.apache.hadoop"
+ name="hadoop-annotations"
+ rev="${hadoop-common.version}"
+ conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-common"
+ rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-common-test"
+ rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-hdfs"
+ rev="${hadoop-hdfs.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-hdfs-test"
+ rev="${hadoop-hdfs.version}" conf="test->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-mapreduce-client-core"
+ rev="${yarn.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-yarn-common"
+ rev="${yarn.version}" conf="common->default"/>
+<dependency org="commons-logging"
+ name="commons-logging"
+ rev="${commons-logging.version}"
+ conf="common->default"/>
+ <dependency org="log4j"
+ name="log4j"
+ rev="${log4j.version}"
+ conf="common->master"/>
+ <dependency org="org.codehaus.jackson"
+ name="jackson-mapper-asl"
+ rev="${jackson.version}"
+ conf="common->default"/>
+ <dependency org="org.codehaus.jackson"
+ name="jackson-core-asl"
+ rev="${jackson.version}"
+ conf="common->default"/>
+ <dependency org="junit"
+ name="junit"
+ rev="${junit.version}"
+ conf="common->default"/>
+ <dependency org="org.aspectj"
+ name="aspectjrt"
+ rev="${aspectj.version}"
+ conf="common->default">
+ </dependency>
+ <dependency org="org.aspectj"
+ name="aspectjtools"
+ rev="${aspectj.version}"
+ conf="common->default">
+ </dependency>
+ <!-- necessary for Mini*Clusters -->
+ <dependency org="commons-httpclient"
+ name="commons-httpclient"
+ rev="${commons-httpclient.version}"
+ conf="common->master"/>
+ <dependency org="commons-codec"
+ name="commons-codec"
+ rev="${commons-codec.version}"
+ conf="common->default"/>
+ <dependency org="commons-net"
+ name="commons-net"
+ rev="${commons-net.version}"
+ conf="common->default"/>
+ <dependency org="org.mortbay.jetty"
+ name="jetty"
+ rev="${jetty.version}"
+ conf="common->master"/>
+ <dependency org="org.mortbay.jetty"
+ name="jetty-util"
+ rev="${jetty-util.version}"
+ conf="common->master"/>
+ <dependency org="org.mortbay.jetty"
+ name="jsp-api-2.1"
+ rev="${jetty.version}"
+ conf="common->master"/>
+ <dependency org="org.mortbay.jetty"
+ name="jsp-2.1"
+ rev="${jetty.version}"
+ conf="common->master"/>
+ <dependency org="org.mortbay.jetty"
+ name="servlet-api-2.5"
+ rev="${servlet-api-2.5.version}"
+ conf="common->master"/>
+ <dependency org="commons-cli"
+ name="commons-cli"
+ rev="${commons-cli.version}"
+ conf="common->default"/>
+ <dependency org="org.apache.avro"
+ name="avro"
+ rev="${avro.version}"
+ conf="common->default">
+ <exclude module="ant"/>
+ <exclude module="jetty"/>
+ <exclude module="slf4j-simple"/>
+ </dependency>
+
+ <!-- Exclusions for transitive dependencies pulled in by log4j -->
+ <exclude org="com.sun.jdmk"/>
+ <exclude org="com.sun.jmx"/>
+ <exclude org="javax.jms"/>
+ <exclude org="javax.mail"/>
+
+ </dependencies>
+</ivy-module>
diff --git a/mapreduce/src/contrib/mumak/ivy/libraries.properties b/hadoop-mapreduce/src/contrib/mumak/ivy/libraries.properties
similarity index 100%
rename from mapreduce/src/contrib/mumak/ivy/libraries.properties
rename to hadoop-mapreduce/src/contrib/mumak/ivy/libraries.properties
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/AllMapsCompletedTaskAction.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/AllMapsCompletedTaskAction.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/AllMapsCompletedTaskAction.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/AllMapsCompletedTaskAction.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/DeterministicCollectionAspects.aj b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/DeterministicCollectionAspects.aj
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/DeterministicCollectionAspects.aj
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/DeterministicCollectionAspects.aj
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/EagerTaskInitializationListenerAspects.aj b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/EagerTaskInitializationListenerAspects.aj
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/EagerTaskInitializationListenerAspects.aj
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/EagerTaskInitializationListenerAspects.aj
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/FakeConcurrentHashMap.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/FakeConcurrentHashMap.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/FakeConcurrentHashMap.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/FakeConcurrentHashMap.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/HeartbeatEvent.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/HeartbeatEvent.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/HeartbeatEvent.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/HeartbeatEvent.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/JobCompleteEvent.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/JobCompleteEvent.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/JobCompleteEvent.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/JobCompleteEvent.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/JobInitializationPollerAspects.aj b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/JobInitializationPollerAspects.aj
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/JobInitializationPollerAspects.aj
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/JobInitializationPollerAspects.aj
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/JobSubmissionEvent.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/JobSubmissionEvent.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/JobSubmissionEvent.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/JobSubmissionEvent.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/LoadProbingEvent.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/LoadProbingEvent.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/LoadProbingEvent.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/LoadProbingEvent.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorCSJobInitializationThread.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorCSJobInitializationThread.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorCSJobInitializationThread.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorCSJobInitializationThread.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorClock.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorClock.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorClock.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorClock.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorEngine.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorEngine.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorEngine.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorEngine.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorEvent.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorEvent.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorEvent.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorEvent.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorEventListener.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorEventListener.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorEventListener.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorEventListener.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorEventQueue.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorEventQueue.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorEventQueue.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorEventQueue.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobCache.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobCache.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobCache.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobCache.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobClient.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobClient.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobClient.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobClient.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobInProgress.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobInProgress.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobInProgress.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobInProgress.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobStory.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobStory.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobStory.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobStory.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobStoryProducer.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobStoryProducer.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobStoryProducer.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobStoryProducer.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobSubmissionPolicy.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobSubmissionPolicy.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobSubmissionPolicy.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobSubmissionPolicy.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobTracker.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobTracker.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobTracker.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobTracker.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorLaunchTaskAction.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorLaunchTaskAction.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorLaunchTaskAction.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorLaunchTaskAction.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorTaskTracker.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorTaskTracker.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorTaskTracker.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorTaskTracker.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorTaskTrackerStatus.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorTaskTrackerStatus.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorTaskTrackerStatus.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorTaskTrackerStatus.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorThreadWakeUpEvent.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorThreadWakeUpEvent.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorThreadWakeUpEvent.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorThreadWakeUpEvent.java
diff --git a/mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/TaskAttemptCompletionEvent.java b/hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/TaskAttemptCompletionEvent.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/TaskAttemptCompletionEvent.java
rename to hadoop-mapreduce/src/contrib/mumak/src/java/org/apache/hadoop/mapred/TaskAttemptCompletionEvent.java
diff --git a/mapreduce/src/contrib/mumak/src/test/data/19-jobs.topology.json.gz b/hadoop-mapreduce/src/contrib/mumak/src/test/data/19-jobs.topology.json.gz
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/test/data/19-jobs.topology.json.gz
rename to hadoop-mapreduce/src/contrib/mumak/src/test/data/19-jobs.topology.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/mumak/src/test/data/19-jobs.trace.json.gz b/hadoop-mapreduce/src/contrib/mumak/src/test/data/19-jobs.trace.json.gz
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/test/data/19-jobs.trace.json.gz
rename to hadoop-mapreduce/src/contrib/mumak/src/test/data/19-jobs.trace.json.gz
Binary files differ
diff --git a/mapreduce/src/contrib/mumak/src/test/data/topo-with-numeric-ips.json b/hadoop-mapreduce/src/contrib/mumak/src/test/data/topo-with-numeric-ips.json
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/test/data/topo-with-numeric-ips.json
rename to hadoop-mapreduce/src/contrib/mumak/src/test/data/topo-with-numeric-ips.json
diff --git a/mapreduce/src/contrib/mumak/src/test/data/topo-without-numeric-ips.json b/hadoop-mapreduce/src/contrib/mumak/src/test/data/topo-without-numeric-ips.json
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/test/data/topo-without-numeric-ips.json
rename to hadoop-mapreduce/src/contrib/mumak/src/test/data/topo-without-numeric-ips.json
diff --git a/mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/CheckedEventQueue.java b/hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/CheckedEventQueue.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/CheckedEventQueue.java
rename to hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/CheckedEventQueue.java
diff --git a/mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/FakeJobs.java b/hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/FakeJobs.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/FakeJobs.java
rename to hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/FakeJobs.java
diff --git a/mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/HeartbeatHelper.java b/hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/HeartbeatHelper.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/HeartbeatHelper.java
rename to hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/HeartbeatHelper.java
diff --git a/mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/MockSimulatorEngine.java b/hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/MockSimulatorEngine.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/MockSimulatorEngine.java
rename to hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/MockSimulatorEngine.java
diff --git a/hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/MockSimulatorJobTracker.java b/hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/MockSimulatorJobTracker.java
new file mode 100644
index 0000000..b80ed89
--- /dev/null
+++ b/hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/MockSimulatorJobTracker.java
@@ -0,0 +1,483 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.mapred.TaskStatus.State;
+import org.apache.hadoop.mapred.TaskStatus.Phase;
+import org.apache.hadoop.mapreduce.ClusterMetrics;
+import org.apache.hadoop.mapreduce.Counters;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.JobPriority;
+import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.QueueAclsInfo;
+import org.apache.hadoop.mapreduce.QueueInfo;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.TaskReport;
+import org.apache.hadoop.mapreduce.TaskTrackerInfo;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus;
+import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
+import org.apache.hadoop.tools.rumen.TaskInfo;
+import org.apache.hadoop.tools.rumen.MapTaskAttemptInfo;
+import org.apache.hadoop.tools.rumen.ReduceTaskAttemptInfo;
+import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.mapreduce.split.JobSplit.*;
+//
+// Mock jobtracker class that check heartbeat() in parameters and
+// sends responses based on a prepopulated table
+//
+public class MockSimulatorJobTracker implements InterTrackerProtocol,
+ ClientProtocol {
+ private final long simulationStartTime;
+ private final int heartbeatInterval;
+
+ // Helper table, used iff checkHeartbeats == true
+ // Contains the expected task tracker status report at time t for all task
+ // trackers identified by their name and the heartbeat response to send
+ private SortedMap<Long, TreeMap<String, HeartbeatHelper>> heartbeats =
+ new TreeMap<Long, TreeMap<String, HeartbeatHelper>>();
+ private final boolean checkHeartbeats;
+ private int jobId = 0;
+
+ static final Log LOG = LogFactory.getLog(MockSimulatorJobTracker.class);
+
+ public MockSimulatorJobTracker(long simulationStartTime,
+ int heartbeatInterval,
+ boolean checkHeartbeats) {
+ this.simulationStartTime = simulationStartTime;
+ this.heartbeatInterval = heartbeatInterval;
+ this.checkHeartbeats = checkHeartbeats;
+ }
+
+ @Override
+ public JobID getNewJobID() throws IOException {
+ return new JobID("mockJT", jobId++);
+ }
+
+ @Override
+ public JobStatus submitJob(
+ JobID jobId, String jobSubmitDir, Credentials ts) throws IOException {
+ JobStatus status = new JobStatus(jobId, 0.0f, 0.0f, 0.0f, 0.0f,
+ JobStatus.State.RUNNING, JobPriority.NORMAL, "", "", "", "");
+ return status;
+ }
+
+ @Override
+ public HeartbeatResponse heartbeat(TaskTrackerStatus status,
+ boolean restarted, boolean initialContact, boolean acceptNewTasks,
+ short responseId) throws IOException {
+ if (!(status instanceof SimulatorTaskTrackerStatus)) {
+ throw new IllegalArgumentException(
+ "Expecting SimulatorTaskTrackerStatus, actual status type "
+ + status.getClass());
+ }
+ SimulatorTaskTrackerStatus trackerStatus =
+ (SimulatorTaskTrackerStatus)status;
+ long now = trackerStatus.getCurrentSimulationTime();
+ String trackerName = status.getTrackerName();
+
+ LOG.debug("Received heartbeat() from trackerName=" + trackerName +
+ ", now=" + now);
+
+ HeartbeatResponse response = new HeartbeatResponse();
+ response.setHeartbeatInterval(heartbeatInterval);
+ response.setActions(new TaskTrackerAction[0]);
+
+ if (checkHeartbeats) {
+ Assert.assertFalse("No more heartbeats were expected ", heartbeats.isEmpty());
+ long nextToCheck = heartbeats.firstKey();
+ // Missing heartbeat check
+ Assert.assertTrue(nextToCheck <= now);
+ if (nextToCheck < now) {
+ LOG.debug("Simulation time progressed, last checked heartbeat at=" +
+ nextToCheck + ", now=" + now + ". Checking if no " +
+ "required heartbeats were missed in the past");
+ SortedMap<String, HeartbeatHelper> previousHeartbeats =
+ heartbeats.get(nextToCheck);
+ Assert.assertNotNull(previousHeartbeats);
+ Assert.assertTrue(previousHeartbeats.isEmpty());
+ heartbeats.remove(nextToCheck);
+ nextToCheck = heartbeats.firstKey();
+ }
+ Assert.assertEquals("Heartbeat at the wrong time", nextToCheck, now);
+
+ SortedMap<String, HeartbeatHelper> currentHeartbeats =
+ heartbeats.get(now);
+ HeartbeatHelper currentHeartbeat = currentHeartbeats.get(trackerName);
+ Assert.assertNotNull("Unknown task tracker name=" + trackerName,
+ currentHeartbeat);
+ currentHeartbeats.remove(trackerName);
+
+ currentHeartbeat.checkHeartbeatParameters(status, acceptNewTasks);
+
+ response.setActions(currentHeartbeat.getTaskTrackerActions());
+ }
+ return response;
+ }
+
+ //
+ // Populates the mock jobtracker's helper & checker table with expected
+ // empty reports from the task trackers and empty task actions to perform
+ //
+ public void expectEmptyHeartbeats(String taskTrackerName,
+ int numHeartbeats) {
+ long simulationTime = simulationStartTime;
+ for (int i=0; i<numHeartbeats; i++) {
+ TreeMap<String, HeartbeatHelper> hb = heartbeats.get(simulationTime);
+ if (hb == null) {
+ hb = new TreeMap<String, HeartbeatHelper>();
+ heartbeats.put(simulationTime, hb);
+ }
+ hb.put(taskTrackerName, new HeartbeatHelper());
+ simulationTime += heartbeatInterval;
+ }
+ }
+
+ // Fills in all the expected and return heartbeat parameters corresponding
+ // to running a map task on a task tracker.
+ // Use killTime < 0 if not killed
+ public void runMapTask(String taskTrackerName, TaskAttemptID taskId,
+ long mapStart, long mapRuntime, long killHeartbeat) {
+ long mapDone = mapStart + mapRuntime;
+ long mapEndHeartbeat = nextHeartbeat(mapDone);
+ final boolean isKilled = (killHeartbeat>=0);
+ if (isKilled) {
+ mapEndHeartbeat = nextHeartbeat(killHeartbeat + 1);
+ }
+
+ LOG.debug("mapStart=" + mapStart + ", mapDone=" + mapDone +
+ ", mapEndHeartbeat=" + mapEndHeartbeat +
+ ", killHeartbeat=" + killHeartbeat);
+
+ final int numSlotsRequired = 1;
+ org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi =
+ org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);
+ Task task = new MapTask("dummyjobfile", taskIdOldApi, 0, new TaskSplitIndex(),
+ numSlotsRequired);
+ // all byte counters are 0
+ TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0);
+ MapTaskAttemptInfo taskAttemptInfo =
+ new MapTaskAttemptInfo(State.SUCCEEDED, taskInfo, mapRuntime);
+ TaskTrackerAction action =
+ new SimulatorLaunchTaskAction(task, taskAttemptInfo);
+ heartbeats.get(mapStart).get(taskTrackerName).addTaskTrackerAction(action);
+ if (isKilled) {
+ action = new KillTaskAction(taskIdOldApi);
+ heartbeats.get(killHeartbeat).get(taskTrackerName).addTaskTrackerAction(
+ action);
+ }
+
+ for(long simulationTime = mapStart + heartbeatInterval;
+ simulationTime <= mapEndHeartbeat;
+ simulationTime += heartbeatInterval) {
+ State state = simulationTime < mapEndHeartbeat ?
+ State.RUNNING : State.SUCCEEDED;
+ if (simulationTime == mapEndHeartbeat && isKilled) {
+ state = State.KILLED;
+ }
+ MapTaskStatus mapStatus = new MapTaskStatus(
+ task.getTaskID(), 0.0f, 0, state, "", "", null, Phase.MAP, null);
+ heartbeats.get(simulationTime).get(taskTrackerName).addTaskReport(
+ mapStatus);
+ }
+ }
+
+ // Fills in all the expected and return heartbeat parameters corresponding
+ // to running a reduce task on a task tracker.
+ // Use killTime<0 if not killed
+ public void runReduceTask(String taskTrackerName, TaskAttemptID taskId,
+ long reduceStart, long mapDoneDelay,
+ long reduceRuntime, long killHeartbeat) {
+ long mapDone = nextHeartbeat(reduceStart + mapDoneDelay);
+ long reduceDone = mapDone + reduceRuntime;
+ long reduceEndHeartbeat = nextHeartbeat(reduceDone);
+ final boolean isKilled = (killHeartbeat>=0);
+ if (isKilled) {
+ reduceEndHeartbeat = nextHeartbeat(killHeartbeat + 1);
+ }
+
+ LOG.debug("reduceStart=" + reduceStart + ", mapDone=" + mapDone +
+ ", reduceDone=" + reduceDone +
+ ", reduceEndHeartbeat=" + reduceEndHeartbeat +
+ ", killHeartbeat=" + killHeartbeat);
+
+ final int numSlotsRequired = 1;
+ org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi =
+ org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);
+ Task task = new ReduceTask("dummyjobfile", taskIdOldApi, 0, 0,
+ numSlotsRequired);
+ // all byte counters are 0
+ TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0);
+ ReduceTaskAttemptInfo taskAttemptInfo =
+ new ReduceTaskAttemptInfo(State.SUCCEEDED, taskInfo, 0, 0,
+ reduceRuntime);
+ TaskTrackerAction action =
+ new SimulatorLaunchTaskAction(task, taskAttemptInfo);
+ heartbeats.get(reduceStart).get(taskTrackerName).addTaskTrackerAction(
+ action);
+ if (!isKilled || mapDone < killHeartbeat) {
+ action = new AllMapsCompletedTaskAction(task.getTaskID());
+ heartbeats.get(mapDone).get(taskTrackerName).addTaskTrackerAction(
+ action);
+ }
+ if (isKilled) {
+ action = new KillTaskAction(taskIdOldApi);
+ heartbeats.get(killHeartbeat).get(taskTrackerName).addTaskTrackerAction(
+ action);
+ }
+
+ for(long simulationTime = reduceStart + heartbeatInterval;
+ simulationTime <= reduceEndHeartbeat;
+ simulationTime += heartbeatInterval) {
+ State state = simulationTime < reduceEndHeartbeat ?
+ State.RUNNING : State.SUCCEEDED;
+ if (simulationTime == reduceEndHeartbeat && isKilled) {
+ state = State.KILLED;
+ }
+ // mapDone is when the all maps done event delivered
+ Phase phase = simulationTime <= mapDone ? Phase.SHUFFLE : Phase.REDUCE;
+ ReduceTaskStatus reduceStatus = new ReduceTaskStatus(
+ task.getTaskID(), 0.0f, 0, state, "", "", null, phase, null);
+ heartbeats.get(simulationTime).get(taskTrackerName).addTaskReport(
+ reduceStatus);
+ }
+ }
+
+ // Should be called at the end of the simulation: Mock JT should have
+ // consumed all entries from the heartbeats table by that time
+ public void checkMissingHeartbeats() {
+ Assert.assertEquals(1, heartbeats.size());
+ long lastHeartbeat = heartbeats.firstKey();
+ Assert.assertTrue("Missing heartbeats, last heartbeat=" + lastHeartbeat,
+ heartbeats.get(lastHeartbeat).isEmpty());
+ }
+
+ // rounds up to the next heartbeat time
+ public long nextHeartbeat(long time) {
+ long numHeartbeats = (long)Math.ceil(
+ (time - simulationStartTime)/(double)heartbeatInterval);
+ return simulationStartTime + numHeartbeats * heartbeatInterval;
+ }
+
+ // Rest of InterTrackerProtocol follows, unused in simulation
+ @Override
+ public String getFilesystemName() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void reportTaskTrackerError(String taskTracker,
+ String errorClass,
+ String errorMessage) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public TaskCompletionEvent[] getTaskCompletionEvents(JobID jobid,
+ int fromEventId, int maxEvents) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public String getSystemDir() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public String getStagingAreaDir() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public String getBuildVersion() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long getProtocolVersion(String protocol, long clientVersion) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public TaskCompletionEvent[] getTaskCompletionEvents(
+ org.apache.hadoop.mapred.JobID jobid, int fromEventId, int maxEvents)
+ throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public TaskTrackerInfo[] getActiveTrackers() throws IOException,
+ InterruptedException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public JobStatus[] getAllJobs() throws IOException, InterruptedException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public TaskTrackerInfo[] getBlacklistedTrackers() throws IOException,
+ InterruptedException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public QueueInfo[] getChildQueues(String queueName) throws IOException,
+ InterruptedException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ClusterMetrics getClusterMetrics() throws IOException,
+ InterruptedException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Counters getJobCounters(JobID jobid) throws IOException,
+ InterruptedException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public String getJobHistoryDir() throws IOException, InterruptedException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public JobStatus getJobStatus(JobID jobid) throws IOException,
+ InterruptedException {
+ throw new UnsupportedOperationException();
+ }
+
+ public org.apache.hadoop.mapreduce.server.jobtracker.State getJobTrackerState()
+ throws IOException, InterruptedException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public JobTrackerStatus getJobTrackerStatus() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public QueueInfo getQueue(String queueName) throws IOException,
+ InterruptedException {
+ throw new UnsupportedOperationException();
+
+ }
+
+ @Override
+ public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException,
+ InterruptedException {
+ throw new UnsupportedOperationException();
+
+ }
+
+ @Override
+ public QueueInfo[] getQueues() throws IOException, InterruptedException {
+ throw new UnsupportedOperationException();
+
+ }
+
+ @Override
+ public QueueInfo[] getRootQueues() throws IOException, InterruptedException {
+ throw new UnsupportedOperationException();
+
+ }
+
+ @Override
+ public AccessControlList getQueueAdmins(String queueName) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public String[] getTaskDiagnostics(TaskAttemptID taskId) throws IOException,
+ InterruptedException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public TaskReport[] getTaskReports(JobID jobid, TaskType type)
+ throws IOException, InterruptedException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long getTaskTrackerExpiryInterval() throws IOException,
+ InterruptedException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void killJob(JobID jobid) throws IOException, InterruptedException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean killTask(TaskAttemptID taskId, boolean shouldFail)
+ throws IOException, InterruptedException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void setJobPriority(JobID jobid, String priority) throws IOException,
+ InterruptedException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void cancelDelegationToken(Token<DelegationTokenIdentifier> token
+ ) throws IOException,
+ InterruptedException {
+ }
+
+ @Override
+ public Token<DelegationTokenIdentifier>
+ getDelegationToken(Text renewer) throws IOException, InterruptedException {
+ return null;
+ }
+
+ @Override
+ public long renewDelegationToken(Token<DelegationTokenIdentifier> token
+ ) throws IOException,InterruptedException{
+ return 0;
+ }
+
+ @Override
+ public ProtocolSignature getProtocolSignature(String protocol,
+ long clientVersion, int clientMethodsHash) throws IOException {
+ return ProtocolSignature.getProtocolSignature(
+ this, protocol, clientVersion, clientMethodsHash);
+ }
+}
diff --git a/mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestRemoveIpsFromLoggedNetworkTopology.java b/hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestRemoveIpsFromLoggedNetworkTopology.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestRemoveIpsFromLoggedNetworkTopology.java
rename to hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestRemoveIpsFromLoggedNetworkTopology.java
diff --git a/mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorDeterministicReplay.java b/hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorDeterministicReplay.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorDeterministicReplay.java
rename to hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorDeterministicReplay.java
diff --git a/mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorEndToEnd.java b/hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorEndToEnd.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorEndToEnd.java
rename to hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorEndToEnd.java
diff --git a/mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorEngine.java b/hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorEngine.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorEngine.java
rename to hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorEngine.java
diff --git a/mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorEventQueue.java b/hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorEventQueue.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorEventQueue.java
rename to hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorEventQueue.java
diff --git a/mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorJobClient.java b/hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorJobClient.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorJobClient.java
rename to hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorJobClient.java
diff --git a/mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorJobTracker.java b/hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorJobTracker.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorJobTracker.java
rename to hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorJobTracker.java
diff --git a/mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorSerialJobSubmission.java b/hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorSerialJobSubmission.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorSerialJobSubmission.java
rename to hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorSerialJobSubmission.java
diff --git a/mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorStressJobSubmission.java b/hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorStressJobSubmission.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorStressJobSubmission.java
rename to hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorStressJobSubmission.java
diff --git a/mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorTaskTracker.java b/hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorTaskTracker.java
similarity index 100%
rename from mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorTaskTracker.java
rename to hadoop-mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorTaskTracker.java
diff --git a/mapreduce/src/contrib/raid/README b/hadoop-mapreduce/src/contrib/raid/README
similarity index 100%
rename from mapreduce/src/contrib/raid/README
rename to hadoop-mapreduce/src/contrib/raid/README
diff --git a/mapreduce/src/contrib/raid/bin/start-raidnode-remote.sh b/hadoop-mapreduce/src/contrib/raid/bin/start-raidnode-remote.sh
similarity index 100%
rename from mapreduce/src/contrib/raid/bin/start-raidnode-remote.sh
rename to hadoop-mapreduce/src/contrib/raid/bin/start-raidnode-remote.sh
diff --git a/mapreduce/src/contrib/raid/bin/start-raidnode.sh b/hadoop-mapreduce/src/contrib/raid/bin/start-raidnode.sh
similarity index 100%
rename from mapreduce/src/contrib/raid/bin/start-raidnode.sh
rename to hadoop-mapreduce/src/contrib/raid/bin/start-raidnode.sh
diff --git a/mapreduce/src/contrib/raid/bin/stop-raidnode-remote.sh b/hadoop-mapreduce/src/contrib/raid/bin/stop-raidnode-remote.sh
similarity index 100%
rename from mapreduce/src/contrib/raid/bin/stop-raidnode-remote.sh
rename to hadoop-mapreduce/src/contrib/raid/bin/stop-raidnode-remote.sh
diff --git a/mapreduce/src/contrib/raid/bin/stop-raidnode.sh b/hadoop-mapreduce/src/contrib/raid/bin/stop-raidnode.sh
similarity index 100%
rename from mapreduce/src/contrib/raid/bin/stop-raidnode.sh
rename to hadoop-mapreduce/src/contrib/raid/bin/stop-raidnode.sh
diff --git a/mapreduce/src/contrib/raid/build.xml b/hadoop-mapreduce/src/contrib/raid/build.xml
similarity index 100%
rename from mapreduce/src/contrib/raid/build.xml
rename to hadoop-mapreduce/src/contrib/raid/build.xml
diff --git a/mapreduce/src/contrib/raid/conf/raid.xml b/hadoop-mapreduce/src/contrib/raid/conf/raid.xml
similarity index 100%
rename from mapreduce/src/contrib/raid/conf/raid.xml
rename to hadoop-mapreduce/src/contrib/raid/conf/raid.xml
diff --git a/hadoop-mapreduce/src/contrib/raid/ivy.xml b/hadoop-mapreduce/src/contrib/raid/ivy.xml
new file mode 100644
index 0000000..5b8cfd8
--- /dev/null
+++ b/hadoop-mapreduce/src/contrib/raid/ivy.xml
@@ -0,0 +1,139 @@
+<?xml version="1.0" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<ivy-module version="1.0">
+ <info organisation="org.apache.hadoop" module="${ant.project.name}">
+ <license name="Apache 2.0"/>
+ <description>Rumen</description>
+ </info>
+ <configurations defaultconfmapping="default">
+ <!--these match the Maven configurations-->
+ <conf name="default" extends="master,runtime"/>
+ <conf name="master" description="contains the artifact but no dependencies"/>
+ <conf name="runtime" description="runtime but not the artifact" />
+
+ <conf name="common" visibility="private" extends="runtime"
+ description="artifacts needed to compile/test the application"/>
+ <conf name="test" visibility="private" extends="runtime"/>
+ </configurations>
+
+ <publications>
+ <!--get the artifact from our module name-->
+ <artifact conf="master"/>
+ </publications>
+ <dependencies>
+ <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop"
+ name="hadoop-common"
+ rev="${hadoop-common.version}"
+ conf="common->default"/>
+ <dependency org="org.apache.hadoop"
+ name="hadoop-common-test"
+ rev="${hadoop-common.version}"
+ conf="test->default"/>
+ <dependency org="org.apache.hadoop"
+ name="hadoop-hdfs"
+ rev="${hadoop-hdfs.version}"
+ conf="common->default"/>
+ <dependency org="org.apache.hadoop"
+ name="hadoop-hdfs-test"
+ rev="${hadoop-hdfs.version}"
+ conf="test->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-mapreduce-client-core"
+ rev="${yarn.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-yarn-common"
+ rev="${yarn.version}" conf="common->default"/>
+
+ <dependency org="commons-logging"
+ name="commons-logging"
+ rev="${commons-logging.version}"
+ conf="common->default"/>
+ <dependency org="log4j"
+ name="log4j"
+ rev="${log4j.version}"
+ conf="common->master"/>
+ <dependency org="junit"
+ name="junit"
+ rev="${junit.version}"
+ conf="common->default"/>
+
+ <!-- necessary for Mini*Clusters -->
+ <dependency org="commons-httpclient"
+ name="commons-httpclient"
+ rev="${commons-httpclient.version}"
+ conf="common->master"/>
+ <dependency org="commons-codec"
+ name="commons-codec"
+ rev="${commons-codec.version}"
+ conf="common->default"/>
+ <dependency org="commons-net"
+ name="commons-net"
+ rev="${commons-net.version}"
+ conf="common->default"/>
+ <dependency org="org.mortbay.jetty"
+ name="jetty"
+ rev="${jetty.version}"
+ conf="common->master"/>
+ <dependency org="org.mortbay.jetty"
+ name="jetty-util"
+ rev="${jetty-util.version}"
+ conf="common->master"/>
+ <dependency org="org.mortbay.jetty"
+ name="jsp-api-2.1"
+ rev="${jetty.version}"
+ conf="common->master"/>
+ <dependency org="org.mortbay.jetty"
+ name="jsp-2.1"
+ rev="${jetty.version}"
+ conf="common->master"/>
+ <dependency org="org.mortbay.jetty"
+ name="servlet-api-2.5"
+ rev="${servlet-api-2.5.version}"
+ conf="common->master"/>
+ <dependency org="commons-cli"
+ name="commons-cli"
+ rev="${commons-cli.version}"
+ conf="common->default"/>
+ <dependency org="org.apache.avro"
+ name="avro"
+ rev="${avro.version}"
+ conf="common->default">
+ <exclude module="ant"/>
+ <exclude module="jetty"/>
+ <exclude module="slf4j-simple"/>
+ </dependency>
+ <dependency org="org.codehaus.jackson"
+ name="jackson-mapper-asl"
+ rev="${jackson.version}"
+ conf="common->default"/>
+ <dependency org="org.codehaus.jackson"
+ name="jackson-core-asl"
+ rev="${jackson.version}"
+ conf="common->default"/>
+ <dependency org="com.thoughtworks.paranamer"
+ name="paranamer"
+ rev="${paranamer.version}"
+ conf="common->default"/>
+
+ <!-- Exclusions for transitive dependencies pulled in by log4j -->
+ <exclude org="com.sun.jdmk"/>
+ <exclude org="com.sun.jmx"/>
+ <exclude org="javax.jms"/>
+ <exclude org="javax.mail"/>
+
+ </dependencies>
+</ivy-module>
diff --git a/mapreduce/src/contrib/raid/ivy/libraries.properties b/hadoop-mapreduce/src/contrib/raid/ivy/libraries.properties
similarity index 100%
rename from mapreduce/src/contrib/raid/ivy/libraries.properties
rename to hadoop-mapreduce/src/contrib/raid/ivy/libraries.properties
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/DistributedRaidFileSystem.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/DistributedRaidFileSystem.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/DistributedRaidFileSystem.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/DistributedRaidFileSystem.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/RaidDFSUtil.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/RaidDFSUtil.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/RaidDFSUtil.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/RaidDFSUtil.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidUtil.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidUtil.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidUtil.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidUtil.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/BlockFixer.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/BlockFixer.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/BlockFixer.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/BlockFixer.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ConfigManager.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ConfigManager.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ConfigManager.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ConfigManager.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/Decoder.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/Decoder.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/Decoder.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/Decoder.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/DirectoryTraversal.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/DirectoryTraversal.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/DirectoryTraversal.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/DirectoryTraversal.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/DistBlockFixer.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/DistBlockFixer.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/DistBlockFixer.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/DistBlockFixer.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaid.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaid.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaid.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaid.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaidNode.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaidNode.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaidNode.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaidNode.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/Encoder.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/Encoder.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/Encoder.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/Encoder.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ErasureCode.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ErasureCode.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ErasureCode.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ErasureCode.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/GaloisField.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/GaloisField.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/GaloisField.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/GaloisField.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/HarIndex.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/HarIndex.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/HarIndex.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/HarIndex.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/JobMonitor.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/JobMonitor.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/JobMonitor.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/JobMonitor.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/LocalBlockFixer.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/LocalBlockFixer.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/LocalBlockFixer.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/LocalBlockFixer.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/LocalRaidNode.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/LocalRaidNode.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/LocalRaidNode.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/LocalRaidNode.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ParityInputStream.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ParityInputStream.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ParityInputStream.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ParityInputStream.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidConfigurationException.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidConfigurationException.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidConfigurationException.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidConfigurationException.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidFilter.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidFilter.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidFilter.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidFilter.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidShell.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidShell.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidShell.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidShell.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidUtils.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidUtils.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidUtils.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidUtils.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonCode.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonCode.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonCode.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonCode.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonDecoder.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonDecoder.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonDecoder.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonDecoder.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonEncoder.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonEncoder.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonEncoder.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonEncoder.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/XORDecoder.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/XORDecoder.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/XORDecoder.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/XORDecoder.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/XOREncoder.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/XOREncoder.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/XOREncoder.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/XOREncoder.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/PolicyInfo.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/PolicyInfo.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/PolicyInfo.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/PolicyInfo.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/PolicyList.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/PolicyList.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/PolicyList.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/PolicyList.java
diff --git a/mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/RaidProtocol.java b/hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/RaidProtocol.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/RaidProtocol.java
rename to hadoop-mapreduce/src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/RaidProtocol.java
diff --git a/mapreduce/src/contrib/raid/src/test/org/apache/hadoop/hdfs/TestRaidDfs.java b/hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/hdfs/TestRaidDfs.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/test/org/apache/hadoop/hdfs/TestRaidDfs.java
rename to hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/hdfs/TestRaidDfs.java
diff --git a/mapreduce/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java b/hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java
rename to hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java
diff --git a/mapreduce/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidTestUtil.java b/hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidTestUtil.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidTestUtil.java
rename to hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidTestUtil.java
diff --git a/mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixer.java b/hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixer.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixer.java
rename to hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixer.java
diff --git a/mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerBlockFixDist.java b/hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerBlockFixDist.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerBlockFixDist.java
rename to hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerBlockFixDist.java
diff --git a/mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerDistConcurrency.java b/hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerDistConcurrency.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerDistConcurrency.java
rename to hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerDistConcurrency.java
diff --git a/mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerGeneratedBlockDist.java b/hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerGeneratedBlockDist.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerGeneratedBlockDist.java
rename to hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerGeneratedBlockDist.java
diff --git a/mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerParityBlockFixDist.java b/hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerParityBlockFixDist.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerParityBlockFixDist.java
rename to hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerParityBlockFixDist.java
diff --git a/mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestDirectoryTraversal.java b/hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestDirectoryTraversal.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestDirectoryTraversal.java
rename to hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestDirectoryTraversal.java
diff --git a/mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestErasureCodes.java b/hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestErasureCodes.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestErasureCodes.java
rename to hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestErasureCodes.java
diff --git a/mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestGaloisField.java b/hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestGaloisField.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestGaloisField.java
rename to hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestGaloisField.java
diff --git a/mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestHarIndexParser.java b/hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestHarIndexParser.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestHarIndexParser.java
rename to hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestHarIndexParser.java
diff --git a/mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidFilter.java b/hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidFilter.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidFilter.java
rename to hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidFilter.java
diff --git a/mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidHar.java b/hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidHar.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidHar.java
rename to hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidHar.java
diff --git a/mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidNode.java b/hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidNode.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidNode.java
rename to hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidNode.java
diff --git a/mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java b/hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java
rename to hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java
diff --git a/mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidShell.java b/hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidShell.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidShell.java
rename to hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidShell.java
diff --git a/mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidShellFsck.java b/hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidShellFsck.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidShellFsck.java
rename to hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidShellFsck.java
diff --git a/mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestReedSolomonDecoder.java b/hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestReedSolomonDecoder.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestReedSolomonDecoder.java
rename to hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestReedSolomonDecoder.java
diff --git a/mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestReedSolomonEncoder.java b/hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestReedSolomonEncoder.java
similarity index 100%
rename from mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestReedSolomonEncoder.java
rename to hadoop-mapreduce/src/contrib/raid/src/test/org/apache/hadoop/raid/TestReedSolomonEncoder.java
diff --git a/mapreduce/src/contrib/streaming/build.xml b/hadoop-mapreduce/src/contrib/streaming/build.xml
similarity index 100%
rename from mapreduce/src/contrib/streaming/build.xml
rename to hadoop-mapreduce/src/contrib/streaming/build.xml
diff --git a/hadoop-mapreduce/src/contrib/streaming/ivy.xml b/hadoop-mapreduce/src/contrib/streaming/ivy.xml
new file mode 100644
index 0000000..eacd377
--- /dev/null
+++ b/hadoop-mapreduce/src/contrib/streaming/ivy.xml
@@ -0,0 +1,93 @@
+<?xml version="1.0" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<ivy-module version="1.0">
+ <info organisation="org.apache.hadoop" module="${ant.project.name}">
+ <license name="Apache 2.0"/>
+ <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
+ <description>
+ Apache Hadoop
+ </description>
+ </info>
+ <configurations defaultconfmapping="default">
+ <!--these match the Maven configurations-->
+ <conf name="default" extends="master,runtime"/>
+ <conf name="master" description="contains the artifact but no dependencies"/>
+ <conf name="runtime" description="runtime but not the artifact" />
+
+ <conf name="common" visibility="private"
+ extends="runtime"
+ description="artifacts needed to compile/test the application"/>
+ <conf name="test" visibility="private" extends="runtime"/>
+ </configurations>
+
+ <publications>
+ <!--get the artifact from our module name-->
+ <artifact conf="master"/>
+ </publications>
+ <dependencies>
+ <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-common"
+ rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-common-test"
+ rev="${hadoop-common.version}" conf="test->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-hdfs"
+ rev="${hadoop-hdfs.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-hdfs-test"
+ rev="${hadoop-hdfs.version}" conf="test->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-mapreduce-client-core"
+ rev="${yarn.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-yarn-common"
+ rev="${yarn.version}" conf="common->default"/>
+
+ <dependency org="commons-cli" name="commons-cli"
+ rev="${commons-cli.version}" conf="common->default"/>
+ <dependency org="commons-logging" name="commons-logging"
+ rev="${commons-logging.version}" conf="common->default"/>
+ <dependency org="junit" name="junit"
+ rev="${junit.version}" conf="common->default"/>
+ <dependency org="org.mortbay.jetty" name="jetty-util"
+ rev="${jetty-util.version}" conf="common->master"/>
+ <dependency org="org.mortbay.jetty" name="jetty"
+ rev="${jetty.version}" conf="common->master"/>
+ <dependency org="org.mortbay.jetty" name="jsp-api-2.1"
+ rev="${jetty.version}" conf="common->master"/>
+ <dependency org="org.mortbay.jetty" name="jsp-2.1"
+ rev="${jetty.version}" conf="common->master"/>
+ <dependency org="org.mortbay.jetty" name="servlet-api-2.5"
+ rev="${servlet-api-2.5.version}" conf="common->master"/>
+ <dependency org="commons-httpclient" name="commons-httpclient"
+ rev="${commons-httpclient.version}" conf="common->default"/>
+ <dependency org="log4j" name="log4j"
+ rev="${log4j.version}" conf="common->master"/>
+ <dependency org="org.apache.avro" name="avro"
+ rev="${avro.version}" conf="common->default">
+ <exclude module="ant"/>
+ <exclude module="jetty"/>
+ <exclude module="slf4j-simple"/>
+ </dependency>
+ <dependency org="org.slf4j" name="slf4j-api"
+ rev="${slf4j-api.version}" conf="common->master"/>
+
+ <!-- Exclusions for transitive dependencies pulled in by log4j -->
+ <exclude org="com.sun.jdmk"/>
+ <exclude org="com.sun.jmx"/>
+ <exclude org="javax.jms"/>
+ <exclude org="javax.mail"/>
+
+ </dependencies>
+</ivy-module>
diff --git a/mapreduce/src/contrib/streaming/ivy/libraries.properties b/hadoop-mapreduce/src/contrib/streaming/ivy/libraries.properties
similarity index 100%
rename from mapreduce/src/contrib/streaming/ivy/libraries.properties
rename to hadoop-mapreduce/src/contrib/streaming/ivy/libraries.properties
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/AutoInputFormat.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/AutoInputFormat.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/AutoInputFormat.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/AutoInputFormat.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/DumpTypedBytes.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/DumpTypedBytes.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/DumpTypedBytes.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/DumpTypedBytes.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/Environment.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/Environment.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/Environment.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/Environment.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/HadoopStreaming.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/HadoopStreaming.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/HadoopStreaming.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/HadoopStreaming.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/JarBuilder.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/JarBuilder.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/JarBuilder.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/JarBuilder.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/LoadTypedBytes.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/LoadTypedBytes.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/LoadTypedBytes.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/LoadTypedBytes.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PathFinder.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PathFinder.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PathFinder.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PathFinder.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeCombiner.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeCombiner.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeCombiner.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeCombiner.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRunner.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRunner.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRunner.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRunner.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapper.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapper.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapper.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapper.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamInputFormat.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamInputFormat.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamInputFormat.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamInputFormat.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamKeyValUtil.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamKeyValUtil.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamKeyValUtil.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamKeyValUtil.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamUtil.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamUtil.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamUtil.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamUtil.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/IdentifierResolver.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/IdentifierResolver.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/IdentifierResolver.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/IdentifierResolver.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/InputWriter.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/InputWriter.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/InputWriter.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/InputWriter.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/OutputReader.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/OutputReader.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/OutputReader.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/OutputReader.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/RawBytesInputWriter.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/RawBytesInputWriter.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/RawBytesInputWriter.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/RawBytesInputWriter.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/RawBytesOutputReader.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/RawBytesOutputReader.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/RawBytesOutputReader.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/RawBytesOutputReader.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TextInputWriter.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TextInputWriter.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TextInputWriter.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TextInputWriter.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TextOutputReader.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TextOutputReader.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TextOutputReader.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TextOutputReader.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TypedBytesInputWriter.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TypedBytesInputWriter.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TypedBytesInputWriter.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TypedBytesInputWriter.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TypedBytesOutputReader.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TypedBytesOutputReader.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TypedBytesOutputReader.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TypedBytesOutputReader.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/package.html b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/package.html
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/package.html
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/streaming/package.html
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/Type.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/Type.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/Type.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/Type.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesInput.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesInput.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesInput.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesInput.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesOutput.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesOutput.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesOutput.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesOutput.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesRecordInput.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesRecordInput.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesRecordInput.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesRecordInput.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesRecordOutput.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesRecordOutput.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesRecordOutput.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesRecordOutput.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesWritable.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesWritable.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesWritable.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesWritable.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesWritableInput.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesWritableInput.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesWritableInput.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesWritableInput.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesWritableOutput.java b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesWritableOutput.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesWritableOutput.java
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesWritableOutput.java
diff --git a/mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/package.html b/hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/package.html
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/package.html
rename to hadoop-mapreduce/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/package.html
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/DelayEchoApp.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/DelayEchoApp.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/DelayEchoApp.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/DelayEchoApp.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/FailApp.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/FailApp.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/FailApp.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/FailApp.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/RawBytesMapApp.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/RawBytesMapApp.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/RawBytesMapApp.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/RawBytesMapApp.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/RawBytesReduceApp.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/RawBytesReduceApp.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/RawBytesReduceApp.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/RawBytesReduceApp.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StderrApp.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StderrApp.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StderrApp.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StderrApp.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StreamAggregate.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StreamAggregate.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StreamAggregate.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StreamAggregate.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestAutoInputFormat.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestAutoInputFormat.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestAutoInputFormat.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestAutoInputFormat.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestClassWithNoPackage.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestClassWithNoPackage.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestClassWithNoPackage.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestClassWithNoPackage.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestDumpTypedBytes.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestDumpTypedBytes.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestDumpTypedBytes.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestDumpTypedBytes.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestFileArgs.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestFileArgs.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestFileArgs.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestFileArgs.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestGzipInput.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestGzipInput.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestGzipInput.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestGzipInput.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestLoadTypedBytes.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestLoadTypedBytes.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestLoadTypedBytes.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestLoadTypedBytes.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleArchiveFiles.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleArchiveFiles.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleArchiveFiles.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleArchiveFiles.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleCachefiles.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleCachefiles.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleCachefiles.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleCachefiles.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestRawBytesStreaming.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestRawBytesStreaming.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestRawBytesStreaming.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestRawBytesStreaming.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamAggregate.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamAggregate.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamAggregate.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamAggregate.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamDataProtocol.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamDataProtocol.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamDataProtocol.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamDataProtocol.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamJob.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamJob.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamJob.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamJob.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamReduceNone.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamReduceNone.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamReduceNone.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamReduceNone.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamXmlMultipleRecords.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamXmlMultipleRecords.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamXmlMultipleRecords.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamXmlMultipleRecords.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamXmlRecordReader.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamXmlRecordReader.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamXmlRecordReader.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamXmlRecordReader.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreaming.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreaming.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreaming.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreaming.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingAsDifferentUser.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingAsDifferentUser.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingAsDifferentUser.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingAsDifferentUser.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingBackground.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingBackground.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingBackground.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingBackground.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingBadRecords.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingBadRecords.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingBadRecords.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingBadRecords.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingCombiner.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingCombiner.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingCombiner.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingCombiner.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingCounters.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingCounters.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingCounters.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingCounters.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingExitStatus.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingExitStatus.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingExitStatus.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingExitStatus.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingFailure.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingFailure.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingFailure.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingFailure.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingKeyValue.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingKeyValue.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingKeyValue.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingKeyValue.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingOutputKeyValueTypes.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingOutputKeyValueTypes.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingOutputKeyValueTypes.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingOutputKeyValueTypes.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingSeparator.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingSeparator.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingSeparator.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingSeparator.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStatus.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStatus.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStatus.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStatus.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStderr.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStderr.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStderr.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStderr.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingTaskLog.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingTaskLog.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingTaskLog.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingTaskLog.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestTypedBytesStreaming.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestTypedBytesStreaming.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestTypedBytesStreaming.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestTypedBytesStreaming.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestUlimit.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestUlimit.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestUlimit.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestUlimit.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrApp.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrApp.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrApp.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrApp.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrAppReduce.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrAppReduce.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrAppReduce.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrAppReduce.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TypedBytesMapApp.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TypedBytesMapApp.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TypedBytesMapApp.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TypedBytesMapApp.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TypedBytesReduceApp.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TypedBytesReduceApp.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TypedBytesReduceApp.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TypedBytesReduceApp.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UlimitApp.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UlimitApp.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UlimitApp.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UlimitApp.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UniqApp.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UniqApp.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UniqApp.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UniqApp.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UtilTest.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UtilTest.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UtilTest.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UtilTest.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/ValueCountReduce.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/ValueCountReduce.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/ValueCountReduce.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/streaming/ValueCountReduce.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/typedbytes/TestIO.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/typedbytes/TestIO.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/typedbytes/TestIO.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/typedbytes/TestIO.java
diff --git a/mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/typedbytes/TestTypedBytesWritable.java b/hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/typedbytes/TestTypedBytesWritable.java
similarity index 100%
rename from mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/typedbytes/TestTypedBytesWritable.java
rename to hadoop-mapreduce/src/contrib/streaming/src/test/org/apache/hadoop/typedbytes/TestTypedBytesWritable.java
diff --git a/mapreduce/src/contrib/vaidya/build.xml b/hadoop-mapreduce/src/contrib/vaidya/build.xml
similarity index 100%
rename from mapreduce/src/contrib/vaidya/build.xml
rename to hadoop-mapreduce/src/contrib/vaidya/build.xml
diff --git a/hadoop-mapreduce/src/contrib/vaidya/ivy.xml b/hadoop-mapreduce/src/contrib/vaidya/ivy.xml
new file mode 100644
index 0000000..dbfb5cf
--- /dev/null
+++ b/hadoop-mapreduce/src/contrib/vaidya/ivy.xml
@@ -0,0 +1,60 @@
+<?xml version="1.0" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<ivy-module version="1.0">
+ <info organisation="org.apache.hadoop" module="${ant.project.name}">
+ <license name="Apache 2.0"/>
+ <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
+ <description>
+ Apache Hadoop
+ </description>
+ </info>
+ <configurations defaultconfmapping="default">
+ <!--these match the Maven configurations-->
+ <conf name="default" extends="master,runtime"/>
+ <conf name="master" description="contains the artifact but no dependencies"/>
+ <conf name="runtime" description="runtime but not the artifact" />
+
+ <conf name="common" visibility="private"
+ extends="runtime"
+ description="artifacts needed to compile the application"/>
+ <conf name="test" visibility="private" extends="runtime"/>
+ </configurations>
+
+ <publications>
+ <!--get the artifact from our module name-->
+ <artifact conf="master"/>
+ </publications>
+ <dependencies>
+ <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-common" rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="log4j" name="log4j" rev="${log4j.version}" conf="common->master"/>
+ <dependency org="commons-logging" name="commons-logging" rev="${commons-logging.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-mapreduce-client-core"
+ rev="${yarn.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-yarn-common"
+ rev="${yarn.version}" conf="common->default"/>
+
+ <!-- Exclusions for transitive dependencies pulled in by log4j -->
+ <exclude org="com.sun.jdmk"/>
+ <exclude org="com.sun.jmx"/>
+ <exclude org="javax.jms"/>
+ <exclude org="javax.mail"/>
+
+ </dependencies>
+</ivy-module>
diff --git a/mapreduce/src/contrib/vaidya/ivy/libraries.properties b/hadoop-mapreduce/src/contrib/vaidya/ivy/libraries.properties
similarity index 100%
rename from mapreduce/src/contrib/vaidya/ivy/libraries.properties
rename to hadoop-mapreduce/src/contrib/vaidya/ivy/libraries.properties
diff --git a/mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/DiagnosticTest.java b/hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/DiagnosticTest.java
similarity index 100%
rename from mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/DiagnosticTest.java
rename to hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/DiagnosticTest.java
diff --git a/mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/JobDiagnoser.java b/hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/JobDiagnoser.java
similarity index 100%
rename from mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/JobDiagnoser.java
rename to hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/JobDiagnoser.java
diff --git a/mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/PostExPerformanceDiagnoser.java b/hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/PostExPerformanceDiagnoser.java
similarity index 100%
rename from mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/PostExPerformanceDiagnoser.java
rename to hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/PostExPerformanceDiagnoser.java
diff --git a/mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/BalancedReducePartitioning.java b/hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/BalancedReducePartitioning.java
similarity index 100%
rename from mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/BalancedReducePartitioning.java
rename to hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/BalancedReducePartitioning.java
diff --git a/mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/MapSideDiskSpill.java b/hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/MapSideDiskSpill.java
similarity index 100%
rename from mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/MapSideDiskSpill.java
rename to hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/MapSideDiskSpill.java
diff --git a/mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/MapsReExecutionImpact.java b/hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/MapsReExecutionImpact.java
similarity index 100%
rename from mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/MapsReExecutionImpact.java
rename to hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/MapsReExecutionImpact.java
diff --git a/mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReadingHDFSFilesAsSideEffect.java b/hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReadingHDFSFilesAsSideEffect.java
similarity index 100%
rename from mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReadingHDFSFilesAsSideEffect.java
rename to hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReadingHDFSFilesAsSideEffect.java
diff --git a/mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReducesReExecutionImpact.java b/hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReducesReExecutionImpact.java
similarity index 100%
rename from mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReducesReExecutionImpact.java
rename to hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReducesReExecutionImpact.java
diff --git a/mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/postex_diagnosis_tests.xml b/hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/postex_diagnosis_tests.xml
similarity index 100%
rename from mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/postex_diagnosis_tests.xml
rename to hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/postex_diagnosis_tests.xml
diff --git a/mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatistics.java b/hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatistics.java
similarity index 100%
rename from mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatistics.java
rename to hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatistics.java
diff --git a/mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatisticsInterface.java b/hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatisticsInterface.java
similarity index 100%
rename from mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatisticsInterface.java
rename to hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatisticsInterface.java
diff --git a/mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/MapTaskStatistics.java b/hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/MapTaskStatistics.java
similarity index 100%
rename from mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/MapTaskStatistics.java
rename to hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/MapTaskStatistics.java
diff --git a/mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/ReduceTaskStatistics.java b/hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/ReduceTaskStatistics.java
similarity index 100%
rename from mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/ReduceTaskStatistics.java
rename to hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/ReduceTaskStatistics.java
diff --git a/mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/TaskStatistics.java b/hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/TaskStatistics.java
similarity index 100%
rename from mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/TaskStatistics.java
rename to hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/TaskStatistics.java
diff --git a/mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/util/XMLUtils.java b/hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/util/XMLUtils.java
similarity index 100%
rename from mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/util/XMLUtils.java
rename to hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/util/XMLUtils.java
diff --git a/mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/vaidya.sh b/hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/vaidya.sh
similarity index 100%
rename from mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/vaidya.sh
rename to hadoop-mapreduce/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/vaidya.sh
diff --git a/mapreduce/src/contrib/vertica/build.xml b/hadoop-mapreduce/src/contrib/vertica/build.xml
similarity index 100%
rename from mapreduce/src/contrib/vertica/build.xml
rename to hadoop-mapreduce/src/contrib/vertica/build.xml
diff --git a/hadoop-mapreduce/src/contrib/vertica/ivy.xml b/hadoop-mapreduce/src/contrib/vertica/ivy.xml
new file mode 100644
index 0000000..b93a0c0
--- /dev/null
+++ b/hadoop-mapreduce/src/contrib/vertica/ivy.xml
@@ -0,0 +1,81 @@
+<?xml version="1.0" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<ivy-module version="1.0">
+ <info organisation="org.apache.hadoop" module="${ant.project.name}">
+ <license name="Apache 2.0"/>
+ <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
+ <description>
+ Apache Hadoop
+ </description>
+ </info>
+ <configurations defaultconfmapping="default">
+ <!--these match the Maven configurations-->
+ <conf name="default" extends="master,runtime"/>
+ <conf name="master" description="contains the artifact but no dependencies"/>
+ <conf name="runtime" description="runtime but not the artifact" />
+
+ <conf name="common" visibility="private"
+ extends="runtime"
+ description="artifacts needed to compile/test the application"/>
+ <conf name="test" visibility="private" extends="runtime"/>
+ </configurations>
+
+ <publications>
+ <!--get the artifact from our module name-->
+ <artifact conf="master"/>
+ </publications>
+ <dependencies>
+ <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop"
+ name="hadoop-common"
+ rev="${hadoop-common.version}"
+ conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-mapreduce-client-core"
+ rev="${yarn.version}" conf="common->default"/>
+ <dependency org="org.apache.hadoop" name="hadoop-yarn-common"
+ rev="${yarn.version}" conf="common->default"/>
+
+ <dependency org="commons-logging"
+ name="commons-logging"
+ rev="${commons-logging.version}"
+ conf="common->default"/>
+ <dependency org="commons-httpclient"
+ name="commons-httpclient"
+ rev="${commons-httpclient.version}"
+ conf="common->default"/>
+ <dependency org="commons-cli"
+ name="commons-cli"
+ rev="${commons-cli.version}"
+ conf="common->default"/>
+ <dependency org="junit"
+ name="junit"
+ rev="${junit.version}"
+ conf="common->default"/>
+ <dependency org="log4j"
+ name="log4j"
+ rev="${log4j.version}"
+ conf="common->master"/>
+
+ <!-- Exclusions for transitive dependencies pulled in by log4j -->
+ <exclude org="com.sun.jdmk"/>
+ <exclude org="com.sun.jmx"/>
+ <exclude org="javax.jms"/>
+ <exclude org="javax.mail"/>
+
+ </dependencies>
+</ivy-module>
diff --git a/mapreduce/src/contrib/vertica/ivy/libraries.properties b/hadoop-mapreduce/src/contrib/vertica/ivy/libraries.properties
similarity index 100%
rename from mapreduce/src/contrib/vertica/ivy/libraries.properties
rename to hadoop-mapreduce/src/contrib/vertica/ivy/libraries.properties
diff --git a/mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaConfiguration.java b/hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaConfiguration.java
similarity index 100%
rename from mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaConfiguration.java
rename to hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaConfiguration.java
diff --git a/mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaInputFormat.java b/hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaInputFormat.java
similarity index 100%
rename from mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaInputFormat.java
rename to hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaInputFormat.java
diff --git a/mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaInputSplit.java b/hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaInputSplit.java
similarity index 100%
rename from mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaInputSplit.java
rename to hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaInputSplit.java
diff --git a/mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaOutputFormat.java b/hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaOutputFormat.java
similarity index 100%
rename from mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaOutputFormat.java
rename to hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaOutputFormat.java
diff --git a/mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaRecord.java b/hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaRecord.java
similarity index 100%
rename from mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaRecord.java
rename to hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaRecord.java
diff --git a/mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaRecordReader.java b/hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaRecordReader.java
similarity index 100%
rename from mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaRecordReader.java
rename to hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaRecordReader.java
diff --git a/mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaRecordWriter.java b/hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaRecordWriter.java
similarity index 100%
rename from mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaRecordWriter.java
rename to hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaRecordWriter.java
diff --git a/mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaStreamingInput.java b/hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaStreamingInput.java
similarity index 100%
rename from mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaStreamingInput.java
rename to hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaStreamingInput.java
diff --git a/mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaStreamingOutput.java b/hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaStreamingOutput.java
similarity index 100%
rename from mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaStreamingOutput.java
rename to hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaStreamingOutput.java
diff --git a/mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaStreamingRecordReader.java b/hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaStreamingRecordReader.java
similarity index 100%
rename from mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaStreamingRecordReader.java
rename to hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaStreamingRecordReader.java
diff --git a/mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaStreamingRecordWriter.java b/hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaStreamingRecordWriter.java
similarity index 100%
rename from mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaStreamingRecordWriter.java
rename to hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaStreamingRecordWriter.java
diff --git a/mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaUtil.java b/hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaUtil.java
similarity index 100%
rename from mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaUtil.java
rename to hadoop-mapreduce/src/contrib/vertica/src/java/org/apache/hadoop/vertica/VerticaUtil.java
diff --git a/mapreduce/src/contrib/vertica/src/test/org/apache/hadoop/vertica/AllTests.java b/hadoop-mapreduce/src/contrib/vertica/src/test/org/apache/hadoop/vertica/AllTests.java
similarity index 100%
rename from mapreduce/src/contrib/vertica/src/test/org/apache/hadoop/vertica/AllTests.java
rename to hadoop-mapreduce/src/contrib/vertica/src/test/org/apache/hadoop/vertica/AllTests.java
diff --git a/mapreduce/src/contrib/vertica/src/test/org/apache/hadoop/vertica/TestExample.java b/hadoop-mapreduce/src/contrib/vertica/src/test/org/apache/hadoop/vertica/TestExample.java
similarity index 100%
rename from mapreduce/src/contrib/vertica/src/test/org/apache/hadoop/vertica/TestExample.java
rename to hadoop-mapreduce/src/contrib/vertica/src/test/org/apache/hadoop/vertica/TestExample.java
diff --git a/mapreduce/src/contrib/vertica/src/test/org/apache/hadoop/vertica/TestVertica.java b/hadoop-mapreduce/src/contrib/vertica/src/test/org/apache/hadoop/vertica/TestVertica.java
similarity index 100%
rename from mapreduce/src/contrib/vertica/src/test/org/apache/hadoop/vertica/TestVertica.java
rename to hadoop-mapreduce/src/contrib/vertica/src/test/org/apache/hadoop/vertica/TestVertica.java
diff --git a/mapreduce/src/contrib/vertica/src/test/org/apache/hadoop/vertica/VerticaTestCase.java b/hadoop-mapreduce/src/contrib/vertica/src/test/org/apache/hadoop/vertica/VerticaTestCase.java
similarity index 100%
rename from mapreduce/src/contrib/vertica/src/test/org/apache/hadoop/vertica/VerticaTestCase.java
rename to hadoop-mapreduce/src/contrib/vertica/src/test/org/apache/hadoop/vertica/VerticaTestCase.java
diff --git a/mapreduce/src/contrib/vertica/testdata/vertica_test.sql b/hadoop-mapreduce/src/contrib/vertica/testdata/vertica_test.sql
similarity index 100%
rename from mapreduce/src/contrib/vertica/testdata/vertica_test.sql
rename to hadoop-mapreduce/src/contrib/vertica/testdata/vertica_test.sql
diff --git a/mapreduce/src/docs/changes/ChangesFancyStyle.css b/hadoop-mapreduce/src/docs/changes/ChangesFancyStyle.css
similarity index 100%
rename from mapreduce/src/docs/changes/ChangesFancyStyle.css
rename to hadoop-mapreduce/src/docs/changes/ChangesFancyStyle.css
diff --git a/mapreduce/src/docs/changes/ChangesSimpleStyle.css b/hadoop-mapreduce/src/docs/changes/ChangesSimpleStyle.css
similarity index 100%
rename from mapreduce/src/docs/changes/ChangesSimpleStyle.css
rename to hadoop-mapreduce/src/docs/changes/ChangesSimpleStyle.css
diff --git a/mapreduce/src/docs/changes/changes2html.pl b/hadoop-mapreduce/src/docs/changes/changes2html.pl
similarity index 100%
rename from mapreduce/src/docs/changes/changes2html.pl
rename to hadoop-mapreduce/src/docs/changes/changes2html.pl
diff --git a/mapreduce/src/docs/forrest.properties b/hadoop-mapreduce/src/docs/forrest.properties
similarity index 100%
rename from mapreduce/src/docs/forrest.properties
rename to hadoop-mapreduce/src/docs/forrest.properties
diff --git a/mapreduce/src/docs/releasenotes.html b/hadoop-mapreduce/src/docs/releasenotes.html
similarity index 100%
rename from mapreduce/src/docs/releasenotes.html
rename to hadoop-mapreduce/src/docs/releasenotes.html
diff --git a/mapreduce/src/docs/src/documentation/README.txt b/hadoop-mapreduce/src/docs/src/documentation/README.txt
similarity index 100%
rename from mapreduce/src/docs/src/documentation/README.txt
rename to hadoop-mapreduce/src/docs/src/documentation/README.txt
diff --git a/mapreduce/src/docs/src/documentation/classes/CatalogManager.properties b/hadoop-mapreduce/src/docs/src/documentation/classes/CatalogManager.properties
similarity index 100%
rename from mapreduce/src/docs/src/documentation/classes/CatalogManager.properties
rename to hadoop-mapreduce/src/docs/src/documentation/classes/CatalogManager.properties
diff --git a/mapreduce/src/docs/src/documentation/conf/cli.xconf b/hadoop-mapreduce/src/docs/src/documentation/conf/cli.xconf
similarity index 100%
rename from mapreduce/src/docs/src/documentation/conf/cli.xconf
rename to hadoop-mapreduce/src/docs/src/documentation/conf/cli.xconf
diff --git a/mapreduce/src/docs/src/documentation/content/xdocs/capacity_scheduler.xml b/hadoop-mapreduce/src/docs/src/documentation/content/xdocs/capacity_scheduler.xml
similarity index 100%
rename from mapreduce/src/docs/src/documentation/content/xdocs/capacity_scheduler.xml
rename to hadoop-mapreduce/src/docs/src/documentation/content/xdocs/capacity_scheduler.xml
diff --git a/mapreduce/src/docs/src/documentation/content/xdocs/distcp.xml b/hadoop-mapreduce/src/docs/src/documentation/content/xdocs/distcp.xml
similarity index 100%
rename from mapreduce/src/docs/src/documentation/content/xdocs/distcp.xml
rename to hadoop-mapreduce/src/docs/src/documentation/content/xdocs/distcp.xml
diff --git a/mapreduce/src/docs/src/documentation/content/xdocs/dynamic_scheduler.xml b/hadoop-mapreduce/src/docs/src/documentation/content/xdocs/dynamic_scheduler.xml
similarity index 100%
rename from mapreduce/src/docs/src/documentation/content/xdocs/dynamic_scheduler.xml
rename to hadoop-mapreduce/src/docs/src/documentation/content/xdocs/dynamic_scheduler.xml
diff --git a/mapreduce/src/docs/src/documentation/content/xdocs/fair_scheduler.xml b/hadoop-mapreduce/src/docs/src/documentation/content/xdocs/fair_scheduler.xml
similarity index 100%
rename from mapreduce/src/docs/src/documentation/content/xdocs/fair_scheduler.xml
rename to hadoop-mapreduce/src/docs/src/documentation/content/xdocs/fair_scheduler.xml
diff --git a/mapreduce/src/docs/src/documentation/content/xdocs/gridmix.xml b/hadoop-mapreduce/src/docs/src/documentation/content/xdocs/gridmix.xml
similarity index 100%
rename from mapreduce/src/docs/src/documentation/content/xdocs/gridmix.xml
rename to hadoop-mapreduce/src/docs/src/documentation/content/xdocs/gridmix.xml
diff --git a/mapreduce/src/docs/src/documentation/content/xdocs/hadoop_archives.xml b/hadoop-mapreduce/src/docs/src/documentation/content/xdocs/hadoop_archives.xml
similarity index 100%
rename from mapreduce/src/docs/src/documentation/content/xdocs/hadoop_archives.xml
rename to hadoop-mapreduce/src/docs/src/documentation/content/xdocs/hadoop_archives.xml
diff --git a/mapreduce/src/docs/src/documentation/content/xdocs/index.xml b/hadoop-mapreduce/src/docs/src/documentation/content/xdocs/index.xml
similarity index 100%
rename from mapreduce/src/docs/src/documentation/content/xdocs/index.xml
rename to hadoop-mapreduce/src/docs/src/documentation/content/xdocs/index.xml
diff --git a/mapreduce/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml b/hadoop-mapreduce/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml
similarity index 100%
rename from mapreduce/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml
rename to hadoop-mapreduce/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml
diff --git a/mapreduce/src/docs/src/documentation/content/xdocs/rumen.xml b/hadoop-mapreduce/src/docs/src/documentation/content/xdocs/rumen.xml
similarity index 100%
rename from mapreduce/src/docs/src/documentation/content/xdocs/rumen.xml
rename to hadoop-mapreduce/src/docs/src/documentation/content/xdocs/rumen.xml
diff --git a/mapreduce/src/docs/src/documentation/content/xdocs/site.xml b/hadoop-mapreduce/src/docs/src/documentation/content/xdocs/site.xml
similarity index 100%
rename from mapreduce/src/docs/src/documentation/content/xdocs/site.xml
rename to hadoop-mapreduce/src/docs/src/documentation/content/xdocs/site.xml
diff --git a/mapreduce/src/docs/src/documentation/content/xdocs/streaming.xml b/hadoop-mapreduce/src/docs/src/documentation/content/xdocs/streaming.xml
similarity index 100%
rename from mapreduce/src/docs/src/documentation/content/xdocs/streaming.xml
rename to hadoop-mapreduce/src/docs/src/documentation/content/xdocs/streaming.xml
diff --git a/mapreduce/src/docs/src/documentation/content/xdocs/tabs.xml b/hadoop-mapreduce/src/docs/src/documentation/content/xdocs/tabs.xml
similarity index 100%
rename from mapreduce/src/docs/src/documentation/content/xdocs/tabs.xml
rename to hadoop-mapreduce/src/docs/src/documentation/content/xdocs/tabs.xml
diff --git a/mapreduce/src/docs/src/documentation/content/xdocs/vaidya.xml b/hadoop-mapreduce/src/docs/src/documentation/content/xdocs/vaidya.xml
similarity index 100%
rename from mapreduce/src/docs/src/documentation/content/xdocs/vaidya.xml
rename to hadoop-mapreduce/src/docs/src/documentation/content/xdocs/vaidya.xml
diff --git a/mapreduce/src/docs/src/documentation/resources/images/architecture.gif b/hadoop-mapreduce/src/docs/src/documentation/resources/images/architecture.gif
similarity index 100%
rename from mapreduce/src/docs/src/documentation/resources/images/architecture.gif
rename to hadoop-mapreduce/src/docs/src/documentation/resources/images/architecture.gif
Binary files differ
diff --git a/mapreduce/src/docs/src/documentation/resources/images/core-logo.gif b/hadoop-mapreduce/src/docs/src/documentation/resources/images/core-logo.gif
similarity index 100%
rename from mapreduce/src/docs/src/documentation/resources/images/core-logo.gif
rename to hadoop-mapreduce/src/docs/src/documentation/resources/images/core-logo.gif
Binary files differ
diff --git a/mapreduce/src/docs/src/documentation/resources/images/favicon.ico b/hadoop-mapreduce/src/docs/src/documentation/resources/images/favicon.ico
similarity index 100%
rename from mapreduce/src/docs/src/documentation/resources/images/favicon.ico
rename to hadoop-mapreduce/src/docs/src/documentation/resources/images/favicon.ico
Binary files differ
diff --git a/mapreduce/src/docs/src/documentation/resources/images/hadoop-logo-big.jpg b/hadoop-mapreduce/src/docs/src/documentation/resources/images/hadoop-logo-big.jpg
similarity index 100%
rename from mapreduce/src/docs/src/documentation/resources/images/hadoop-logo-big.jpg
rename to hadoop-mapreduce/src/docs/src/documentation/resources/images/hadoop-logo-big.jpg
Binary files differ
diff --git a/mapreduce/src/docs/src/documentation/resources/images/hadoop-logo.jpg b/hadoop-mapreduce/src/docs/src/documentation/resources/images/hadoop-logo.jpg
similarity index 100%
rename from mapreduce/src/docs/src/documentation/resources/images/hadoop-logo.jpg
rename to hadoop-mapreduce/src/docs/src/documentation/resources/images/hadoop-logo.jpg
Binary files differ
diff --git a/mapreduce/src/docs/src/documentation/resources/images/hdfsarchitecture.gif b/hadoop-mapreduce/src/docs/src/documentation/resources/images/hdfsarchitecture.gif
similarity index 100%
rename from mapreduce/src/docs/src/documentation/resources/images/hdfsarchitecture.gif
rename to hadoop-mapreduce/src/docs/src/documentation/resources/images/hdfsarchitecture.gif
Binary files differ
diff --git a/mapreduce/src/docs/src/documentation/resources/images/hdfsarchitecture.odg b/hadoop-mapreduce/src/docs/src/documentation/resources/images/hdfsarchitecture.odg
similarity index 100%
rename from mapreduce/src/docs/src/documentation/resources/images/hdfsarchitecture.odg
rename to hadoop-mapreduce/src/docs/src/documentation/resources/images/hdfsarchitecture.odg
Binary files differ
diff --git a/mapreduce/src/docs/src/documentation/resources/images/hdfsarchitecture.png b/hadoop-mapreduce/src/docs/src/documentation/resources/images/hdfsarchitecture.png
similarity index 100%
rename from mapreduce/src/docs/src/documentation/resources/images/hdfsarchitecture.png
rename to hadoop-mapreduce/src/docs/src/documentation/resources/images/hdfsarchitecture.png
Binary files differ
diff --git a/mapreduce/src/docs/src/documentation/resources/images/hdfsdatanodes.gif b/hadoop-mapreduce/src/docs/src/documentation/resources/images/hdfsdatanodes.gif
similarity index 100%
rename from mapreduce/src/docs/src/documentation/resources/images/hdfsdatanodes.gif
rename to hadoop-mapreduce/src/docs/src/documentation/resources/images/hdfsdatanodes.gif
Binary files differ
diff --git a/mapreduce/src/docs/src/documentation/resources/images/hdfsdatanodes.odg b/hadoop-mapreduce/src/docs/src/documentation/resources/images/hdfsdatanodes.odg
similarity index 100%
rename from mapreduce/src/docs/src/documentation/resources/images/hdfsdatanodes.odg
rename to hadoop-mapreduce/src/docs/src/documentation/resources/images/hdfsdatanodes.odg
Binary files differ
diff --git a/mapreduce/src/docs/src/documentation/resources/images/hdfsdatanodes.png b/hadoop-mapreduce/src/docs/src/documentation/resources/images/hdfsdatanodes.png
similarity index 100%
rename from mapreduce/src/docs/src/documentation/resources/images/hdfsdatanodes.png
rename to hadoop-mapreduce/src/docs/src/documentation/resources/images/hdfsdatanodes.png
Binary files differ
diff --git a/mapreduce/src/docs/src/documentation/resources/images/mapreduce-logo.jpg b/hadoop-mapreduce/src/docs/src/documentation/resources/images/mapreduce-logo.jpg
similarity index 100%
rename from mapreduce/src/docs/src/documentation/resources/images/mapreduce-logo.jpg
rename to hadoop-mapreduce/src/docs/src/documentation/resources/images/mapreduce-logo.jpg
Binary files differ
diff --git a/mapreduce/src/docs/src/documentation/skinconf.xml b/hadoop-mapreduce/src/docs/src/documentation/skinconf.xml
similarity index 100%
rename from mapreduce/src/docs/src/documentation/skinconf.xml
rename to hadoop-mapreduce/src/docs/src/documentation/skinconf.xml
diff --git a/mapreduce/src/docs/status.xml b/hadoop-mapreduce/src/docs/status.xml
similarity index 100%
rename from mapreduce/src/docs/status.xml
rename to hadoop-mapreduce/src/docs/status.xml
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/AggregateWordCount.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/AggregateWordCount.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/AggregateWordCount.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/AggregateWordCount.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/AggregateWordHistogram.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/AggregateWordHistogram.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/AggregateWordHistogram.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/AggregateWordHistogram.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/BaileyBorweinPlouffe.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/DBCountPageView.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/DBCountPageView.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/DBCountPageView.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/DBCountPageView.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/ExampleDriver.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/ExampleDriver.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/ExampleDriver.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/ExampleDriver.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/Grep.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/Grep.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/Grep.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/Grep.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/Join.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/Join.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/Join.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/Join.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/MultiFileWordCount.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/MultiFileWordCount.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/MultiFileWordCount.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/MultiFileWordCount.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/QuasiMonteCarlo.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/QuasiMonteCarlo.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/QuasiMonteCarlo.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/QuasiMonteCarlo.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/RandomTextWriter.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/RandomTextWriter.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/RandomTextWriter.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/RandomTextWriter.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/RandomWriter.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/RandomWriter.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/RandomWriter.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/RandomWriter.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/SecondarySort.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/SecondarySort.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/SecondarySort.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/SecondarySort.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/Sort.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/Sort.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/Sort.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/Sort.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/WordCount.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/WordCount.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/WordCount.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/WordCount.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/dancing/DancingLinks.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/dancing/DancingLinks.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/dancing/DancingLinks.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/dancing/DancingLinks.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/dancing/DistributedPentomino.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/dancing/DistributedPentomino.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/dancing/DistributedPentomino.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/dancing/DistributedPentomino.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/dancing/OneSidedPentomino.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/dancing/OneSidedPentomino.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/dancing/OneSidedPentomino.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/dancing/OneSidedPentomino.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/dancing/Pentomino.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/dancing/Pentomino.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/dancing/Pentomino.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/dancing/Pentomino.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/dancing/Sudoku.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/dancing/Sudoku.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/dancing/Sudoku.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/dancing/Sudoku.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/dancing/package.html b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/dancing/package.html
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/dancing/package.html
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/dancing/package.html
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/dancing/puzzle1.dta b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/dancing/puzzle1.dta
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/dancing/puzzle1.dta
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/dancing/puzzle1.dta
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/package.html b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/package.html
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/package.html
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/package.html
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/pi/Combinable.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/Combinable.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/pi/Combinable.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/Combinable.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/pi/Container.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/Container.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/pi/Container.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/Container.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/pi/DistBbp.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/DistBbp.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/pi/DistBbp.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/DistBbp.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/pi/DistSum.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/DistSum.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/pi/DistSum.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/DistSum.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/pi/Parser.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/Parser.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/pi/Parser.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/Parser.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/pi/SummationWritable.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/SummationWritable.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/pi/SummationWritable.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/SummationWritable.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/pi/TaskResult.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/TaskResult.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/pi/TaskResult.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/TaskResult.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/pi/Util.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/Util.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/pi/Util.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/Util.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/pi/math/ArithmeticProgression.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/math/ArithmeticProgression.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/pi/math/ArithmeticProgression.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/math/ArithmeticProgression.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/pi/math/Bellard.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/math/Bellard.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/pi/math/Bellard.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/math/Bellard.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/pi/math/LongLong.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/math/LongLong.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/pi/math/LongLong.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/math/LongLong.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/pi/math/Modular.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/math/Modular.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/pi/math/Modular.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/math/Modular.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/pi/math/Montgomery.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/math/Montgomery.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/pi/math/Montgomery.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/math/Montgomery.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/pi/math/Summation.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/math/Summation.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/pi/math/Summation.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/math/Summation.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/pi/math/package.html b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/math/package.html
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/pi/math/package.html
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/math/package.html
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/pi/package.html b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/package.html
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/pi/package.html
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/pi/package.html
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/.gitignore b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/.gitignore
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/.gitignore
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/.gitignore
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/100TBTaskTime.png b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/100TBTaskTime.png
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/100TBTaskTime.png
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/100TBTaskTime.png
Binary files differ
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/1PBTaskTime.png b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/1PBTaskTime.png
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/1PBTaskTime.png
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/1PBTaskTime.png
Binary files differ
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/1TBTaskTime.png b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/1TBTaskTime.png
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/1TBTaskTime.png
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/1TBTaskTime.png
Binary files differ
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/500GBTaskTime.png b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/500GBTaskTime.png
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/500GBTaskTime.png
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/500GBTaskTime.png
Binary files differ
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/Yahoo2009.tex b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/Yahoo2009.tex
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/Yahoo2009.tex
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/Yahoo2009.tex
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/tera.bib b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/tera.bib
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/tera.bib
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/tera.bib
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/terasort/GenSort.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/GenSort.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/terasort/GenSort.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/GenSort.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/terasort/Random16.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/Random16.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/terasort/Random16.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/Random16.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraChecksum.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraChecksum.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraChecksum.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraChecksum.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraGen.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraGen.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraGen.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraGen.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraInputFormat.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraInputFormat.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraInputFormat.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraInputFormat.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraOutputFormat.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraScheduler.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraScheduler.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraScheduler.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraScheduler.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraSort.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraSort.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraSort.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraSort.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraValidate.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraValidate.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraValidate.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/TeraValidate.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/terasort/Unsigned16.java b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/Unsigned16.java
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/terasort/Unsigned16.java
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/Unsigned16.java
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/terasort/job_history_summary.py b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/job_history_summary.py
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/terasort/job_history_summary.py
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/job_history_summary.py
diff --git a/mapreduce/src/examples/org/apache/hadoop/examples/terasort/package.html b/hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/package.html
similarity index 100%
rename from mapreduce/src/examples/org/apache/hadoop/examples/terasort/package.html
rename to hadoop-mapreduce/src/examples/org/apache/hadoop/examples/terasort/package.html
diff --git a/mapreduce/src/examples/pipes/.autom4te.cfg b/hadoop-mapreduce/src/examples/pipes/.autom4te.cfg
similarity index 100%
rename from mapreduce/src/examples/pipes/.autom4te.cfg
rename to hadoop-mapreduce/src/examples/pipes/.autom4te.cfg
diff --git a/mapreduce/src/examples/pipes/Makefile.am b/hadoop-mapreduce/src/examples/pipes/Makefile.am
similarity index 100%
rename from mapreduce/src/examples/pipes/Makefile.am
rename to hadoop-mapreduce/src/examples/pipes/Makefile.am
diff --git a/mapreduce/src/examples/pipes/README.txt b/hadoop-mapreduce/src/examples/pipes/README.txt
similarity index 100%
rename from mapreduce/src/examples/pipes/README.txt
rename to hadoop-mapreduce/src/examples/pipes/README.txt
diff --git a/mapreduce/src/examples/pipes/conf/word-part.xml b/hadoop-mapreduce/src/examples/pipes/conf/word-part.xml
similarity index 100%
rename from mapreduce/src/examples/pipes/conf/word-part.xml
rename to hadoop-mapreduce/src/examples/pipes/conf/word-part.xml
diff --git a/mapreduce/src/examples/pipes/conf/word.xml b/hadoop-mapreduce/src/examples/pipes/conf/word.xml
similarity index 100%
rename from mapreduce/src/examples/pipes/conf/word.xml
rename to hadoop-mapreduce/src/examples/pipes/conf/word.xml
diff --git a/mapreduce/src/examples/pipes/configure.ac b/hadoop-mapreduce/src/examples/pipes/configure.ac
similarity index 100%
rename from mapreduce/src/examples/pipes/configure.ac
rename to hadoop-mapreduce/src/examples/pipes/configure.ac
diff --git a/mapreduce/src/examples/pipes/impl/sort.cc b/hadoop-mapreduce/src/examples/pipes/impl/sort.cc
similarity index 100%
rename from mapreduce/src/examples/pipes/impl/sort.cc
rename to hadoop-mapreduce/src/examples/pipes/impl/sort.cc
diff --git a/mapreduce/src/examples/pipes/impl/wordcount-nopipe.cc b/hadoop-mapreduce/src/examples/pipes/impl/wordcount-nopipe.cc
similarity index 100%
rename from mapreduce/src/examples/pipes/impl/wordcount-nopipe.cc
rename to hadoop-mapreduce/src/examples/pipes/impl/wordcount-nopipe.cc
diff --git a/mapreduce/src/examples/pipes/impl/wordcount-part.cc b/hadoop-mapreduce/src/examples/pipes/impl/wordcount-part.cc
similarity index 100%
rename from mapreduce/src/examples/pipes/impl/wordcount-part.cc
rename to hadoop-mapreduce/src/examples/pipes/impl/wordcount-part.cc
diff --git a/mapreduce/src/examples/pipes/impl/wordcount-simple.cc b/hadoop-mapreduce/src/examples/pipes/impl/wordcount-simple.cc
similarity index 100%
rename from mapreduce/src/examples/pipes/impl/wordcount-simple.cc
rename to hadoop-mapreduce/src/examples/pipes/impl/wordcount-simple.cc
diff --git a/mapreduce/src/examples/python/WordCount.py b/hadoop-mapreduce/src/examples/python/WordCount.py
similarity index 100%
rename from mapreduce/src/examples/python/WordCount.py
rename to hadoop-mapreduce/src/examples/python/WordCount.py
diff --git a/mapreduce/src/examples/python/compile b/hadoop-mapreduce/src/examples/python/compile
similarity index 100%
rename from mapreduce/src/examples/python/compile
rename to hadoop-mapreduce/src/examples/python/compile
diff --git a/mapreduce/src/examples/python/pyAbacus/JyAbacusWCPlugIN.py b/hadoop-mapreduce/src/examples/python/pyAbacus/JyAbacusWCPlugIN.py
similarity index 100%
rename from mapreduce/src/examples/python/pyAbacus/JyAbacusWCPlugIN.py
rename to hadoop-mapreduce/src/examples/python/pyAbacus/JyAbacusWCPlugIN.py
diff --git a/mapreduce/src/examples/python/pyAbacus/JythonAbacus.py b/hadoop-mapreduce/src/examples/python/pyAbacus/JythonAbacus.py
similarity index 100%
rename from mapreduce/src/examples/python/pyAbacus/JythonAbacus.py
rename to hadoop-mapreduce/src/examples/python/pyAbacus/JythonAbacus.py
diff --git a/mapreduce/src/examples/python/pyAbacus/compile b/hadoop-mapreduce/src/examples/python/pyAbacus/compile
similarity index 100%
rename from mapreduce/src/examples/python/pyAbacus/compile
rename to hadoop-mapreduce/src/examples/python/pyAbacus/compile
diff --git a/mapreduce/src/examples/python/pyAbacus/wordcountaggregator.spec b/hadoop-mapreduce/src/examples/python/pyAbacus/wordcountaggregator.spec
similarity index 100%
rename from mapreduce/src/examples/python/pyAbacus/wordcountaggregator.spec
rename to hadoop-mapreduce/src/examples/python/pyAbacus/wordcountaggregator.spec
diff --git a/mapreduce/src/java/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider b/hadoop-mapreduce/src/java/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider
similarity index 100%
rename from mapreduce/src/java/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider
rename to hadoop-mapreduce/src/java/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider
diff --git a/mapreduce/src/java/mapred-default.xml b/hadoop-mapreduce/src/java/mapred-default.xml
similarity index 100%
rename from mapreduce/src/java/mapred-default.xml
rename to hadoop-mapreduce/src/java/mapred-default.xml
diff --git a/mapreduce/src/java/mapred-queues-default.xml b/hadoop-mapreduce/src/java/mapred-queues-default.xml
similarity index 100%
rename from mapreduce/src/java/mapred-queues-default.xml
rename to hadoop-mapreduce/src/java/mapred-queues-default.xml
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/ACLsManager.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/ACLsManager.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/ACLsManager.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/ACLsManager.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/AdminOperationsProtocol.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/AdminOperationsProtocol.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/AdminOperationsProtocol.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/AdminOperationsProtocol.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/Child.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/Child.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/Child.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/Child.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/CommitTaskAction.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/CommitTaskAction.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/CommitTaskAction.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/CommitTaskAction.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/CompletedJobStatusStore.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/CompletedJobStatusStore.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/CompletedJobStatusStore.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/CompletedJobStatusStore.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/CompositeTaskTrackerInstrumentation.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/CompositeTaskTrackerInstrumentation.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/CompositeTaskTrackerInstrumentation.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/CompositeTaskTrackerInstrumentation.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/DefaultTaskController.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/DefaultTaskController.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/DefaultTaskController.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/DefaultTaskController.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/DisallowedTaskTrackerException.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/DisallowedTaskTrackerException.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/DisallowedTaskTrackerException.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/DisallowedTaskTrackerException.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/EagerTaskInitializationListener.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/EagerTaskInitializationListener.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/EagerTaskInitializationListener.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/EagerTaskInitializationListener.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/HeartbeatResponse.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/HeartbeatResponse.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/HeartbeatResponse.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/HeartbeatResponse.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/InterTrackerProtocol.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/InterTrackerProtocol.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/InterTrackerProtocol.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/InterTrackerProtocol.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JSPUtil.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JSPUtil.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JSPUtil.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JSPUtil.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobChangeEvent.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobChangeEvent.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobChangeEvent.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobChangeEvent.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobInProgress.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobInProgress.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobInProgress.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobInProgress.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobInProgressListener.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobInProgressListener.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobInProgressListener.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobInProgressListener.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobQueueClient.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobQueueClient.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobQueueClient.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobQueueClient.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobQueueJobInProgressListener.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobQueueJobInProgressListener.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobQueueJobInProgressListener.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobQueueJobInProgressListener.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobQueueTaskScheduler.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobQueueTaskScheduler.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobQueueTaskScheduler.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobQueueTaskScheduler.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobStatusChangeEvent.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobStatusChangeEvent.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobStatusChangeEvent.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobStatusChangeEvent.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobTracker.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobTracker.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobTracker.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobTracker.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobTrackerClientProtocolProvider.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobTrackerClientProtocolProvider.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobTrackerClientProtocolProvider.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobTrackerClientProtocolProvider.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobTrackerInstrumentation.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobTrackerInstrumentation.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobTrackerInstrumentation.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobTrackerInstrumentation.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobTrackerMetricsInst.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobTrackerMetricsInst.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobTrackerMetricsInst.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobTrackerMetricsInst.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobTrackerStatistics.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobTrackerStatistics.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JobTrackerStatistics.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JobTrackerStatistics.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JvmManager.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JvmManager.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/JvmManager.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/JvmManager.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/KillJobAction.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/KillJobAction.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/KillJobAction.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/KillJobAction.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/KillTaskAction.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/KillTaskAction.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/KillTaskAction.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/KillTaskAction.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/LaunchTaskAction.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/LaunchTaskAction.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/LaunchTaskAction.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/LaunchTaskAction.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/LimitTasksPerJobTaskScheduler.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/LimitTasksPerJobTaskScheduler.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/LimitTasksPerJobTaskScheduler.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/LimitTasksPerJobTaskScheduler.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/LinuxTaskController.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/LinuxTaskController.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/LinuxTaskController.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/LinuxTaskController.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/LocalClientProtocolProvider.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/LocalClientProtocolProvider.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/LocalClientProtocolProvider.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/LocalClientProtocolProvider.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/LocalJobRunner.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/LocalJobRunner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/LocalJobRunner.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/LocalJobRunner.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/MapReducePolicyProvider.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/MapReducePolicyProvider.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/MapReducePolicyProvider.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/MapReducePolicyProvider.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/MapTaskRunner.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/MapTaskRunner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/MapTaskRunner.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/MapTaskRunner.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/NodeHealthCheckerService.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/NodeHealthCheckerService.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/NodeHealthCheckerService.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/NodeHealthCheckerService.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/ReduceTaskRunner.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/ReduceTaskRunner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/ReduceTaskRunner.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/ReduceTaskRunner.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/ReinitTrackerAction.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/ReinitTrackerAction.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/ReinitTrackerAction.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/ReinitTrackerAction.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/ResourceEstimator.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/ResourceEstimator.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/ResourceEstimator.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/ResourceEstimator.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskController.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskController.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskController.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskController.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskGraphServlet.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskGraphServlet.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskGraphServlet.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskGraphServlet.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskInProgress.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskInProgress.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskInProgress.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskInProgress.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskLogServlet.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskLogServlet.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskLogServlet.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskLogServlet.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskMemoryManagerThread.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskMemoryManagerThread.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskMemoryManagerThread.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskMemoryManagerThread.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskRunner.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskRunner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskRunner.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskRunner.java
diff --git a/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskScheduler.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskScheduler.java
new file mode 100644
index 0000000..afee66af
--- /dev/null
+++ b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskScheduler.java
@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker;
+
+/**
+ * Used by a {@link JobTracker} to schedule {@link Task}s on
+ * {@link TaskTracker}s.
+ * <p>
+ * {@link TaskScheduler}s typically use one or more
+ * {@link JobInProgressListener}s to receive notifications about jobs.
+ * <p>
+ * It is the responsibility of the {@link TaskScheduler}
+ * to initialize tasks for a job, by calling {@link JobInProgress#initTasks()}
+ * between the job being added (when
+ * {@link JobInProgressListener#jobAdded(JobInProgress)} is called)
+ * and tasks for that job being assigned (by
+ * {@link #assignTasks(TaskTracker)}).
+ * @see EagerTaskInitializationListener
+ */
+abstract class TaskScheduler implements Configurable {
+
+ protected Configuration conf;
+ protected TaskTrackerManager taskTrackerManager;
+
+ public Configuration getConf() {
+ return conf;
+ }
+
+ public void setConf(Configuration conf) {
+ this.conf = conf;
+ }
+
+ public synchronized void setTaskTrackerManager(
+ TaskTrackerManager taskTrackerManager) {
+ this.taskTrackerManager = taskTrackerManager;
+ }
+
+ /**
+ * Lifecycle method to allow the scheduler to start any work in separate
+ * threads.
+ * @throws IOException
+ */
+ public void start() throws IOException {
+ // do nothing
+ }
+
+ /**
+ * Lifecycle method to allow the scheduler to stop any work it is doing.
+ * @throws IOException
+ */
+ public void terminate() throws IOException {
+ // do nothing
+ }
+
+ /**
+ * Returns the tasks we'd like the TaskTracker to execute right now.
+ *
+ * @param taskTracker The TaskTracker for which we're looking for tasks.
+ * @return A list of tasks to run on that TaskTracker, possibly empty.
+ */
+ public abstract List<Task> assignTasks(TaskTracker taskTracker)
+ throws IOException;
+
+ /**
+ * Returns a collection of jobs in an order which is specific to
+ * the particular scheduler.
+ * @param queueName
+ * @return
+ */
+ public abstract Collection<JobInProgress> getJobs(String queueName);
+
+ /**
+ * Get the {@link QueueRefresher} for this scheduler. By default, no
+ * {@link QueueRefresher} exists for a scheduler and is set to null.
+ * Schedulers need to return an instance of {@link QueueRefresher} if they
+ * wish to refresh their queue-configuration when {@link QueueManager}
+ * refreshes its own queue-configuration via an administrator request.
+ *
+ * @return
+ */
+ QueueRefresher getQueueRefresher() {
+ return null;
+ }
+}
diff --git a/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskTracker.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskTracker.java
new file mode 100644
index 0000000..1e712be
--- /dev/null
+++ b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskTracker.java
@@ -0,0 +1,4233 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.hadoop.mapred;
+
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.StringTokenizer;
+import java.util.TreeMap;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.regex.Pattern;
+
+import javax.crypto.SecretKey;
+import javax.servlet.ServletContext;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.io.SecureIOUtils;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.mapred.TaskController.DebugScriptContext;
+import org.apache.hadoop.mapred.TaskController.JobInitializationContext;
+import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext;
+import org.apache.hadoop.mapred.TaskController.TaskControllerPathDeletionContext;
+import org.apache.hadoop.mapred.TaskController.TaskControllerTaskPathDeletionContext;
+import org.apache.hadoop.mapred.TaskController.TaskControllerJobPathDeletionContext;
+import org.apache.hadoop.mapred.TaskTrackerStatus.TaskTrackerHealthStatus;
+import org.apache.hadoop.mapred.pipes.Submitter;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import static org.apache.hadoop.mapred.QueueManager.toFullPropertyName;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.filecache.TrackerDistributedCacheManager;
+import org.apache.hadoop.mapreduce.security.SecureShuffleUtils;
+import org.apache.hadoop.mapreduce.security.TokenCache;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
+import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
+import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
+import org.apache.hadoop.mapreduce.server.tasktracker.Localizer;
+import org.apache.hadoop.mapreduce.task.reduce.ShuffleHeader;
+import org.apache.hadoop.metrics.MetricsContext;
+import org.apache.hadoop.metrics.MetricsException;
+import org.apache.hadoop.metrics.MetricsRecord;
+import org.apache.hadoop.metrics.MetricsUtil;
+import org.apache.hadoop.metrics.Updater;
+import org.apache.hadoop.net.DNS;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.util.DiskChecker;
+import org.apache.hadoop.mapreduce.util.ConfigUtil;
+import org.apache.hadoop.mapreduce.util.MemoryCalculatorPlugin;
+import org.apache.hadoop.mapreduce.util.ResourceCalculatorPlugin;
+import org.apache.hadoop.mapreduce.util.ProcfsBasedProcessTree;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.RunJar;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.VersionInfo;
+import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+import org.apache.hadoop.mapreduce.util.MRAsyncDiskService;
+
+/*******************************************************
+ * TaskTracker is a process that starts and tracks MR Tasks
+ * in a networked environment. It contacts the JobTracker
+ * for Task assignments and reporting results.
+ *
+ *******************************************************/
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class TaskTracker
+ implements MRConstants, TaskUmbilicalProtocol, Runnable, TTConfig {
+ /**
+ * @deprecated
+ */
+ @Deprecated
+ static final String MAPRED_TASKTRACKER_VMEM_RESERVED_PROPERTY =
+ "mapred.tasktracker.vmem.reserved";
+ /**
+ * @deprecated
+ */
+ @Deprecated
+ static final String MAPRED_TASKTRACKER_PMEM_RESERVED_PROPERTY =
+ "mapred.tasktracker.pmem.reserved";
+
+
+ static final long WAIT_FOR_DONE = 3 * 1000;
+ private int httpPort;
+
+ static enum State {NORMAL, STALE, INTERRUPTED, DENIED}
+
+ static{
+ ConfigUtil.loadResources();
+ }
+
+ public static final Log LOG =
+ LogFactory.getLog(TaskTracker.class);
+
+ public static final String MR_CLIENTTRACE_FORMAT =
+ "src: %s" + // src IP
+ ", dest: %s" + // dst IP
+ ", maps: %s" + // number of maps
+ ", op: %s" + // operation
+ ", reduceID: %s" + // reduce id
+ ", duration: %s"; // duration
+
+ public static final Log ClientTraceLog =
+ LogFactory.getLog(TaskTracker.class.getName() + ".clienttrace");
+
+ // Job ACLs file is created by TaskTracker under userlogs/$jobid directory for
+ // each job at job localization time. This will be used by TaskLogServlet for
+ // authorizing viewing of task logs of that job
+ static String jobACLsFile = "job-acls.xml";
+
+ volatile boolean running = true;
+
+ private LocalDirAllocator localDirAllocator;
+ String taskTrackerName;
+ String localHostname;
+ InetSocketAddress jobTrackAddr;
+
+ InetSocketAddress taskReportAddress;
+
+ Server taskReportServer = null;
+ InterTrackerProtocol jobClient;
+
+ private TrackerDistributedCacheManager distributedCacheManager;
+
+ // last heartbeat response received
+ short heartbeatResponseId = -1;
+
+ static final String TASK_CLEANUP_SUFFIX = ".cleanup";
+
+ /*
+ * This is the last 'status' report sent by this tracker to the JobTracker.
+ *
+ * If the rpc call succeeds, this 'status' is cleared-out by this tracker;
+ * indicating that a 'fresh' status report be generated; in the event the
+ * rpc calls fails for whatever reason, the previous status report is sent
+ * again.
+ */
+ TaskTrackerStatus status = null;
+
+ // The system-directory on HDFS where job files are stored
+ Path systemDirectory = null;
+
+ // The filesystem where job files are stored
+ FileSystem systemFS = null;
+
+ private final HttpServer server;
+
+ volatile boolean shuttingDown = false;
+
+ Map<TaskAttemptID, TaskInProgress> tasks = new HashMap<TaskAttemptID, TaskInProgress>();
+ /**
+ * Map from taskId -> TaskInProgress.
+ */
+ Map<TaskAttemptID, TaskInProgress> runningTasks = null;
+ Map<JobID, RunningJob> runningJobs = new TreeMap<JobID, RunningJob>();
+ private final JobTokenSecretManager jobTokenSecretManager
+ = new JobTokenSecretManager();
+
+ volatile int mapTotal = 0;
+ volatile int reduceTotal = 0;
+ boolean justStarted = true;
+ boolean justInited = true;
+ // Mark reduce tasks that are shuffling to rollback their events index
+ Set<TaskAttemptID> shouldReset = new HashSet<TaskAttemptID>();
+
+ //dir -> DF
+ Map<String, DF> localDirsDf = new HashMap<String, DF>();
+ long minSpaceStart = 0;
+ //must have this much space free to start new tasks
+ boolean acceptNewTasks = true;
+ long minSpaceKill = 0;
+ //if we run under this limit, kill one task
+ //and make sure we never receive any new jobs
+ //until all the old tasks have been cleaned up.
+ //this is if a machine is so full it's only good
+ //for serving map output to the other nodes
+
+ static Random r = new Random();
+ public static final String SUBDIR = "taskTracker";
+ static final String DISTCACHEDIR = "distcache";
+ static final String JOBCACHE = "jobcache";
+ static final String OUTPUT = "output";
+ private static final String JARSDIR = "jars";
+ static final String LOCAL_SPLIT_FILE = "split.dta";
+ static final String LOCAL_SPLIT_META_FILE = "split.info";
+ static final String JOBFILE = "job.xml";
+ static final String JOB_TOKEN_FILE="jobToken"; //localized file
+
+ static final String JOB_LOCAL_DIR = MRJobConfig.JOB_LOCAL_DIR;
+
+ private JobConf fConf;
+ private FileSystem localFs;
+
+ private Localizer localizer;
+
+ private int maxMapSlots;
+ private int maxReduceSlots;
+ private int failures;
+
+ private ACLsManager aclsManager;
+
+ // Performance-related config knob to send an out-of-band heartbeat
+ // on task completion
+ private volatile boolean oobHeartbeatOnTaskCompletion;
+
+ // Track number of completed tasks to send an out-of-band heartbeat
+ private IntWritable finishedCount = new IntWritable(0);
+
+ private MapEventsFetcherThread mapEventsFetcher;
+ int workerThreads;
+ CleanupQueue directoryCleanupThread;
+ private volatile JvmManager jvmManager;
+ UserLogCleaner taskLogCleanupThread;
+ private TaskMemoryManagerThread taskMemoryManager;
+ private boolean taskMemoryManagerEnabled = true;
+ private long totalVirtualMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT;
+ private long totalPhysicalMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT;
+ private long mapSlotMemorySizeOnTT = JobConf.DISABLED_MEMORY_LIMIT;
+ private long reduceSlotSizeMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT;
+ private long totalMemoryAllottedForTasks = JobConf.DISABLED_MEMORY_LIMIT;
+ private long reservedPhysicalMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT;
+ private ResourceCalculatorPlugin resourceCalculatorPlugin = null;
+
+ /**
+ * the minimum interval between jobtracker polls
+ */
+ private volatile int heartbeatInterval =
+ JTConfig.JT_HEARTBEAT_INTERVAL_MIN_DEFAULT;
+ /**
+ * Number of maptask completion events locations to poll for at one time
+ */
+ private int probe_sample_size = 500;
+
+ private IndexCache indexCache;
+
+ private MRAsyncDiskService asyncDiskService;
+
+ MRAsyncDiskService getAsyncDiskService() {
+ return asyncDiskService;
+ }
+
+ void setAsyncDiskService(MRAsyncDiskService asyncDiskService) {
+ this.asyncDiskService = asyncDiskService;
+ }
+
+ /**
+ * Handle to the specific instance of the {@link TaskController} class
+ */
+ private TaskController taskController;
+
+ /**
+ * Handle to the specific instance of the {@link NodeHealthCheckerService}
+ */
+ private NodeHealthCheckerService healthChecker;
+
+ /*
+ * A list of commitTaskActions for whom commit response has been received
+ */
+ private List<TaskAttemptID> commitResponses =
+ Collections.synchronizedList(new ArrayList<TaskAttemptID>());
+
+ private ShuffleServerMetrics shuffleServerMetrics;
+ /** This class contains the methods that should be used for metrics-reporting
+ * the specific metrics for shuffle. The TaskTracker is actually a server for
+ * the shuffle and hence the name ShuffleServerMetrics.
+ */
+ class ShuffleServerMetrics implements Updater {
+ private MetricsRecord shuffleMetricsRecord = null;
+ private int serverHandlerBusy = 0;
+ private long outputBytes = 0;
+ private int failedOutputs = 0;
+ private int successOutputs = 0;
+ private int exceptionsCaught = 0;
+ ShuffleServerMetrics(JobConf conf) {
+ MetricsContext context = MetricsUtil.getContext("mapred");
+ shuffleMetricsRecord =
+ MetricsUtil.createRecord(context, "shuffleOutput");
+ this.shuffleMetricsRecord.setTag("sessionId", conf.getSessionId());
+ context.registerUpdater(this);
+ }
+ synchronized void serverHandlerBusy() {
+ ++serverHandlerBusy;
+ }
+ synchronized void serverHandlerFree() {
+ --serverHandlerBusy;
+ }
+ synchronized void outputBytes(long bytes) {
+ outputBytes += bytes;
+ }
+ synchronized void failedOutput() {
+ ++failedOutputs;
+ }
+ synchronized void successOutput() {
+ ++successOutputs;
+ }
+ synchronized void exceptionsCaught() {
+ ++exceptionsCaught;
+ }
+ public void doUpdates(MetricsContext unused) {
+ synchronized (this) {
+ if (workerThreads != 0) {
+ shuffleMetricsRecord.setMetric("shuffle_handler_busy_percent",
+ 100*((float)serverHandlerBusy/workerThreads));
+ } else {
+ shuffleMetricsRecord.setMetric("shuffle_handler_busy_percent", 0);
+ }
+ shuffleMetricsRecord.incrMetric("shuffle_output_bytes",
+ outputBytes);
+ shuffleMetricsRecord.incrMetric("shuffle_failed_outputs",
+ failedOutputs);
+ shuffleMetricsRecord.incrMetric("shuffle_success_outputs",
+ successOutputs);
+ shuffleMetricsRecord.incrMetric("shuffle_exceptions_caught",
+ exceptionsCaught);
+ outputBytes = 0;
+ failedOutputs = 0;
+ successOutputs = 0;
+ exceptionsCaught = 0;
+ }
+ shuffleMetricsRecord.update();
+ }
+ }
+
+
+
+
+
+ private TaskTrackerInstrumentation myInstrumentation = null;
+
+ public TaskTrackerInstrumentation getTaskTrackerInstrumentation() {
+ return myInstrumentation;
+ }
+
+ // Currently used only in tests
+ void setTaskTrackerInstrumentation(
+ TaskTrackerInstrumentation trackerInstrumentation) {
+ myInstrumentation = trackerInstrumentation;
+ }
+
+ /**
+ * A list of tips that should be cleaned up.
+ */
+ private BlockingQueue<TaskTrackerAction> tasksToCleanup =
+ new LinkedBlockingQueue<TaskTrackerAction>();
+
+ @Override
+ public ProtocolSignature getProtocolSignature(String protocol,
+ long clientVersion, int clientMethodsHash) throws IOException {
+ return ProtocolSignature.getProtocolSignature(
+ this, protocol, clientVersion, clientMethodsHash);
+ }
+
+ /**
+ * A daemon-thread that pulls tips off the list of things to cleanup.
+ */
+ private Thread taskCleanupThread =
+ new Thread(new Runnable() {
+ public void run() {
+ while (true) {
+ try {
+ TaskTrackerAction action = tasksToCleanup.take();
+ if (action instanceof KillJobAction) {
+ purgeJob((KillJobAction) action);
+ } else if (action instanceof KillTaskAction) {
+ processKillTaskAction((KillTaskAction) action);
+ } else {
+ LOG.error("Non-delete action given to cleanup thread: "
+ + action);
+ }
+ } catch (Throwable except) {
+ LOG.warn(StringUtils.stringifyException(except));
+ }
+ }
+ }
+ }, "taskCleanup");
+
+ void processKillTaskAction(KillTaskAction killAction) throws IOException {
+ TaskInProgress tip;
+ synchronized (TaskTracker.this) {
+ tip = tasks.get(killAction.getTaskID());
+ }
+ LOG.info("Received KillTaskAction for task: " +
+ killAction.getTaskID());
+ purgeTask(tip, false);
+ }
+
+ public TaskController getTaskController() {
+ return taskController;
+ }
+
+ // Currently this is used only by tests
+ void setTaskController(TaskController t) {
+ taskController = t;
+ }
+
+ private RunningJob addTaskToJob(JobID jobId,
+ TaskInProgress tip) {
+ synchronized (runningJobs) {
+ RunningJob rJob = null;
+ if (!runningJobs.containsKey(jobId)) {
+ rJob = new RunningJob(jobId);
+ rJob.localized = false;
+ rJob.tasks = new HashSet<TaskInProgress>();
+ runningJobs.put(jobId, rJob);
+ } else {
+ rJob = runningJobs.get(jobId);
+ }
+ synchronized (rJob) {
+ rJob.tasks.add(tip);
+ }
+ runningJobs.notify(); //notify the fetcher thread
+ return rJob;
+ }
+ }
+
+ private void removeTaskFromJob(JobID jobId, TaskInProgress tip) {
+ synchronized (runningJobs) {
+ RunningJob rjob = runningJobs.get(jobId);
+ if (rjob == null) {
+ LOG.warn("Unknown job " + jobId + " being deleted.");
+ } else {
+ synchronized (rjob) {
+ rjob.tasks.remove(tip);
+ }
+ }
+ }
+ }
+
+ JobTokenSecretManager getJobTokenSecretManager() {
+ return jobTokenSecretManager;
+ }
+
+ RunningJob getRunningJob(JobID jobId) {
+ return runningJobs.get(jobId);
+ }
+
+ Localizer getLocalizer() {
+ return localizer;
+ }
+
+ void setLocalizer(Localizer l) {
+ localizer = l;
+ }
+
+ public static String getUserDir(String user) {
+ return TaskTracker.SUBDIR + Path.SEPARATOR + user;
+ }
+
+ public static String getPrivateDistributedCacheDir(String user) {
+ return getUserDir(user) + Path.SEPARATOR + TaskTracker.DISTCACHEDIR;
+ }
+
+ public static String getPublicDistributedCacheDir() {
+ return TaskTracker.SUBDIR + Path.SEPARATOR + TaskTracker.DISTCACHEDIR;
+ }
+
+ public static String getJobCacheSubdir(String user) {
+ return getUserDir(user) + Path.SEPARATOR + TaskTracker.JOBCACHE;
+ }
+
+ public static String getLocalJobDir(String user, String jobid) {
+ return getJobCacheSubdir(user) + Path.SEPARATOR + jobid;
+ }
+
+ static String getLocalJobConfFile(String user, String jobid) {
+ return getLocalJobDir(user, jobid) + Path.SEPARATOR + TaskTracker.JOBFILE;
+ }
+
+ static String getLocalJobTokenFile(String user, String jobid) {
+ return getLocalJobDir(user, jobid) + Path.SEPARATOR + TaskTracker.JOB_TOKEN_FILE;
+ }
+
+
+ static String getTaskConfFile(String user, String jobid, String taskid,
+ boolean isCleanupAttempt) {
+ return getLocalTaskDir(user, jobid, taskid, isCleanupAttempt)
+ + Path.SEPARATOR + TaskTracker.JOBFILE;
+ }
+
+ static String getJobJarsDir(String user, String jobid) {
+ return getLocalJobDir(user, jobid) + Path.SEPARATOR + TaskTracker.JARSDIR;
+ }
+
+ static String getJobJarFile(String user, String jobid) {
+ return getJobJarsDir(user, jobid) + Path.SEPARATOR + "job.jar";
+ }
+
+ static String getJobWorkDir(String user, String jobid) {
+ return getLocalJobDir(user, jobid) + Path.SEPARATOR + MRConstants.WORKDIR;
+ }
+
+ static String getLocalSplitMetaFile(String user, String jobid, String taskid){
+ return TaskTracker.getLocalTaskDir(user, jobid, taskid) + Path.SEPARATOR
+ + TaskTracker.LOCAL_SPLIT_META_FILE;
+ }
+
+ static String getLocalSplitFile(String user, String jobid, String taskid) {
+ return TaskTracker.getLocalTaskDir(user, jobid, taskid) + Path.SEPARATOR
+ + TaskTracker.LOCAL_SPLIT_FILE;
+ }
+
+ static String getIntermediateOutputDir(String user, String jobid,
+ String taskid) {
+ return getLocalTaskDir(user, jobid, taskid) + Path.SEPARATOR
+ + TaskTracker.OUTPUT;
+ }
+
+ static String getLocalTaskDir(String user, String jobid, String taskid) {
+ return getLocalTaskDir(user, jobid, taskid, false);
+ }
+
+ public static String getLocalTaskDir(String user, String jobid, String taskid,
+ boolean isCleanupAttempt) {
+ String taskDir = getLocalJobDir(user, jobid) + Path.SEPARATOR + taskid;
+ if (isCleanupAttempt) {
+ taskDir = taskDir + TASK_CLEANUP_SUFFIX;
+ }
+ return taskDir;
+ }
+
+ static String getTaskWorkDir(String user, String jobid, String taskid,
+ boolean isCleanupAttempt) {
+ String dir = getLocalTaskDir(user, jobid, taskid, isCleanupAttempt);
+ return dir + Path.SEPARATOR + MRConstants.WORKDIR;
+ }
+
+ String getPid(TaskAttemptID tid) {
+ TaskInProgress tip = tasks.get(tid);
+ if (tip != null) {
+ return jvmManager.getPid(tip.getTaskRunner());
+ }
+ return null;
+ }
+
+ public long getProtocolVersion(String protocol,
+ long clientVersion) throws IOException {
+ if (protocol.equals(TaskUmbilicalProtocol.class.getName())) {
+ return TaskUmbilicalProtocol.versionID;
+ } else {
+ throw new IOException("Unknown protocol for task tracker: " +
+ protocol);
+ }
+ }
+
+
+ int getHttpPort() {
+ return httpPort;
+ }
+
+ /**
+ * Do the real constructor work here. It's in a separate method
+ * so we can call it again and "recycle" the object after calling
+ * close().
+ */
+ synchronized void initialize() throws IOException, InterruptedException {
+
+ LOG.info("Starting tasktracker with owner as " +
+ aclsManager.getMROwner().getShortUserName());
+
+ localFs = FileSystem.getLocal(fConf);
+ // use configured nameserver & interface to get local hostname
+ if (fConf.get(TT_HOST_NAME) != null) {
+ this.localHostname = fConf.get(TT_HOST_NAME);
+ }
+ if (localHostname == null) {
+ this.localHostname =
+ DNS.getDefaultHost
+ (fConf.get(TT_DNS_INTERFACE,"default"),
+ fConf.get(TT_DNS_NAMESERVER,"default"));
+ }
+
+ // Check local disk, start async disk service, and clean up all
+ // local directories.
+ checkLocalDirs(this.fConf.getLocalDirs());
+ setAsyncDiskService(new MRAsyncDiskService(fConf));
+ getAsyncDiskService().cleanupAllVolumes();
+
+ // Clear out state tables
+ this.tasks.clear();
+ this.runningTasks = new LinkedHashMap<TaskAttemptID, TaskInProgress>();
+ this.runningJobs = new TreeMap<JobID, RunningJob>();
+ this.mapTotal = 0;
+ this.reduceTotal = 0;
+ this.acceptNewTasks = true;
+ this.status = null;
+
+ this.minSpaceStart = this.fConf.getLong(TT_LOCAL_DIR_MINSPACE_START, 0L);
+ this.minSpaceKill = this.fConf.getLong(TT_LOCAL_DIR_MINSPACE_KILL, 0L);
+ //tweak the probe sample size (make it a function of numCopiers)
+ probe_sample_size =
+ this.fConf.getInt(TT_MAX_TASK_COMPLETION_EVENTS_TO_POLL, 500);
+
+ // Set up TaskTracker instrumentation
+ this.myInstrumentation = createInstrumentation(this, fConf);
+
+ // bind address
+ InetSocketAddress socAddr = NetUtils.createSocketAddr(
+ fConf.get(TT_REPORT_ADDRESS, "127.0.0.1:0"));
+ String bindAddress = socAddr.getHostName();
+ int tmpPort = socAddr.getPort();
+
+ this.jvmManager = new JvmManager(this);
+
+ // RPC initialization
+ int max = maxMapSlots > maxReduceSlots ?
+ maxMapSlots : maxReduceSlots;
+ //set the num handlers to max*2 since canCommit may wait for the duration
+ //of a heartbeat RPC
+ this.taskReportServer = RPC.getServer(this.getClass(), this, bindAddress,
+ tmpPort, 2 * max, false, this.fConf, this.jobTokenSecretManager);
+
+ // Set service-level authorization security policy
+ if (this.fConf.getBoolean(
+ CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
+ PolicyProvider policyProvider =
+ (PolicyProvider)(ReflectionUtils.newInstance(
+ this.fConf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
+ MapReducePolicyProvider.class, PolicyProvider.class),
+ this.fConf));
+ this.taskReportServer.refreshServiceAcl(fConf, policyProvider);
+ }
+
+ this.taskReportServer.start();
+
+ // get the assigned address
+ this.taskReportAddress = taskReportServer.getListenerAddress();
+ this.fConf.set(TT_REPORT_ADDRESS,
+ taskReportAddress.getHostName() + ":" + taskReportAddress.getPort());
+ LOG.info("TaskTracker up at: " + this.taskReportAddress);
+
+ this.taskTrackerName = "tracker_" + localHostname + ":" + taskReportAddress;
+ LOG.info("Starting tracker " + taskTrackerName);
+
+ Class<? extends TaskController> taskControllerClass = fConf.getClass(
+ TT_TASK_CONTROLLER, DefaultTaskController.class, TaskController.class);
+ taskController = (TaskController) ReflectionUtils.newInstance(
+ taskControllerClass, fConf);
+
+
+ // setup and create jobcache directory with appropriate permissions
+ taskController.setup();
+
+ // Initialize DistributedCache
+ this.distributedCacheManager =
+ new TrackerDistributedCacheManager(this.fConf, taskController,
+ asyncDiskService);
+ this.distributedCacheManager.startCleanupThread();
+
+ this.jobClient = (InterTrackerProtocol)
+ UserGroupInformation.getLoginUser().doAs(
+ new PrivilegedExceptionAction<Object>() {
+ public Object run() throws IOException {
+ return RPC.waitForProxy(InterTrackerProtocol.class,
+ InterTrackerProtocol.versionID,
+ jobTrackAddr, fConf);
+ }
+ });
+ this.justInited = true;
+ this.running = true;
+ // start the thread that will fetch map task completion events
+ this.mapEventsFetcher = new MapEventsFetcherThread();
+ mapEventsFetcher.setDaemon(true);
+ mapEventsFetcher.setName(
+ "Map-events fetcher for all reduce tasks " + "on " +
+ taskTrackerName);
+ mapEventsFetcher.start();
+
+ Class<? extends ResourceCalculatorPlugin> clazz =
+ fConf.getClass(TT_RESOURCE_CALCULATOR_PLUGIN,
+ null, ResourceCalculatorPlugin.class);
+ resourceCalculatorPlugin = ResourceCalculatorPlugin
+ .getResourceCalculatorPlugin(clazz, fConf);
+ LOG.info(" Using ResourceCalculatorPlugin : " + resourceCalculatorPlugin);
+ initializeMemoryManagement();
+
+ setIndexCache(new IndexCache(this.fConf));
+
+ //clear old user logs
+ taskLogCleanupThread.clearOldUserLogs(this.fConf);
+
+ mapLauncher = new TaskLauncher(TaskType.MAP, maxMapSlots);
+ reduceLauncher = new TaskLauncher(TaskType.REDUCE, maxReduceSlots);
+ mapLauncher.start();
+ reduceLauncher.start();
+
+ // create a localizer instance
+ setLocalizer(new Localizer(localFs, fConf.getLocalDirs(), taskController));
+
+ //Start up node health checker service.
+ if (shouldStartHealthMonitor(this.fConf)) {
+ startHealthMonitor(this.fConf);
+ }
+
+ oobHeartbeatOnTaskCompletion =
+ fConf.getBoolean(TT_OUTOFBAND_HEARBEAT, false);
+ }
+
+ /**
+ * Are ACLs for authorization checks enabled on the MR cluster ?
+ */
+ boolean areACLsEnabled() {
+ return fConf.getBoolean(MRConfig.MR_ACLS_ENABLED, false);
+ }
+
+ public static Class<?>[] getInstrumentationClasses(Configuration conf) {
+ return conf.getClasses(TT_INSTRUMENTATION, TaskTrackerMetricsInst.class);
+ }
+
+ public static void setInstrumentationClass(
+ Configuration conf, Class<? extends TaskTrackerInstrumentation> t) {
+ conf.setClass(TT_INSTRUMENTATION,
+ t, TaskTrackerInstrumentation.class);
+ }
+
+ public static TaskTrackerInstrumentation createInstrumentation(
+ TaskTracker tt, Configuration conf) {
+ try {
+ Class<?>[] instrumentationClasses = getInstrumentationClasses(conf);
+ if (instrumentationClasses.length == 0) {
+ LOG.error("Empty string given for " + TT_INSTRUMENTATION +
+ " property -- will use default instrumentation class instead");
+ return new TaskTrackerMetricsInst(tt);
+ } else if (instrumentationClasses.length == 1) {
+ // Just one instrumentation class given; create it directly
+ Class<?> cls = instrumentationClasses[0];
+ java.lang.reflect.Constructor<?> c =
+ cls.getConstructor(new Class[] {TaskTracker.class} );
+ return (TaskTrackerInstrumentation) c.newInstance(tt);
+ } else {
+ // Multiple instrumentation classes given; use a composite object
+ List<TaskTrackerInstrumentation> instrumentations =
+ new ArrayList<TaskTrackerInstrumentation>();
+ for (Class<?> cls: instrumentationClasses) {
+ java.lang.reflect.Constructor<?> c =
+ cls.getConstructor(new Class[] {TaskTracker.class} );
+ TaskTrackerInstrumentation inst =
+ (TaskTrackerInstrumentation) c.newInstance(tt);
+ instrumentations.add(inst);
+ }
+ return new CompositeTaskTrackerInstrumentation(tt, instrumentations);
+ }
+ } catch(Exception e) {
+ // Reflection can throw lots of exceptions -- handle them all by
+ // falling back on the default.
+ LOG.error("Failed to initialize TaskTracker metrics", e);
+ return new TaskTrackerMetricsInst(tt);
+ }
+ }
+
+ /**
+ * Removes all contents of temporary storage. Called upon
+ * startup, to remove any leftovers from previous run.
+ *
+ * Use MRAsyncDiskService.moveAndDeleteAllVolumes instead.
+ * @see org.apache.hadoop.mapreduce.util.MRAsyncDiskService#cleanupAllVolumes()
+ */
+ @Deprecated
+ public void cleanupStorage() throws IOException {
+ this.fConf.deleteLocalFiles();
+ }
+
+ // Object on wait which MapEventsFetcherThread is going to wait.
+ private Object waitingOn = new Object();
+
+ private class MapEventsFetcherThread extends Thread {
+
+ private List <FetchStatus> reducesInShuffle() {
+ List <FetchStatus> fList = new ArrayList<FetchStatus>();
+ for (Map.Entry <JobID, RunningJob> item : runningJobs.entrySet()) {
+ RunningJob rjob = item.getValue();
+ JobID jobId = item.getKey();
+ FetchStatus f;
+ synchronized (rjob) {
+ f = rjob.getFetchStatus();
+ for (TaskInProgress tip : rjob.tasks) {
+ Task task = tip.getTask();
+ if (!task.isMapTask()) {
+ if (((ReduceTask)task).getPhase() ==
+ TaskStatus.Phase.SHUFFLE) {
+ if (rjob.getFetchStatus() == null) {
+ //this is a new job; we start fetching its map events
+ f = new FetchStatus(jobId,
+ ((ReduceTask)task).getNumMaps());
+ rjob.setFetchStatus(f);
+ }
+ f = rjob.getFetchStatus();
+ fList.add(f);
+ break; //no need to check any more tasks belonging to this
+ }
+ }
+ }
+ }
+ }
+ //at this point, we have information about for which of
+ //the running jobs do we need to query the jobtracker for map
+ //outputs (actually map events).
+ return fList;
+ }
+
+ @Override
+ public void run() {
+ LOG.info("Starting thread: " + this.getName());
+
+ while (running) {
+ try {
+ List <FetchStatus> fList = null;
+ synchronized (runningJobs) {
+ while (((fList = reducesInShuffle()).size()) == 0) {
+ try {
+ runningJobs.wait();
+ } catch (InterruptedException e) {
+ LOG.info("Shutting down: " + this.getName());
+ return;
+ }
+ }
+ }
+ // now fetch all the map task events for all the reduce tasks
+ // possibly belonging to different jobs
+ boolean fetchAgain = false; //flag signifying whether we want to fetch
+ //immediately again.
+ for (FetchStatus f : fList) {
+ long currentTime = System.currentTimeMillis();
+ try {
+ //the method below will return true when we have not
+ //fetched all available events yet
+ if (f.fetchMapCompletionEvents(currentTime)) {
+ fetchAgain = true;
+ }
+ } catch (Exception e) {
+ LOG.warn(
+ "Ignoring exception that fetch for map completion" +
+ " events threw for " + f.jobId + " threw: " +
+ StringUtils.stringifyException(e));
+ }
+ if (!running) {
+ break;
+ }
+ }
+ synchronized (waitingOn) {
+ try {
+ if (!fetchAgain) {
+ waitingOn.wait(heartbeatInterval);
+ }
+ } catch (InterruptedException ie) {
+ LOG.info("Shutting down: " + this.getName());
+ return;
+ }
+ }
+ } catch (Exception e) {
+ LOG.info("Ignoring exception " + e.getMessage());
+ }
+ }
+ }
+ }
+
+ private class FetchStatus {
+ /** The next event ID that we will start querying the JobTracker from*/
+ private IntWritable fromEventId;
+ /** This is the cache of map events for a given job */
+ private List<TaskCompletionEvent> allMapEvents;
+ /** What jobid this fetchstatus object is for*/
+ private JobID jobId;
+ private long lastFetchTime;
+ private boolean fetchAgain;
+
+ public FetchStatus(JobID jobId, int numMaps) {
+ this.fromEventId = new IntWritable(0);
+ this.jobId = jobId;
+ this.allMapEvents = new ArrayList<TaskCompletionEvent>(numMaps);
+ }
+
+ /**
+ * Reset the events obtained so far.
+ */
+ public void reset() {
+ // Note that the sync is first on fromEventId and then on allMapEvents
+ synchronized (fromEventId) {
+ synchronized (allMapEvents) {
+ fromEventId.set(0); // set the new index for TCE
+ allMapEvents.clear();
+ }
+ }
+ }
+
+ public TaskCompletionEvent[] getMapEvents(int fromId, int max) {
+
+ TaskCompletionEvent[] mapEvents =
+ TaskCompletionEvent.EMPTY_ARRAY;
+ boolean notifyFetcher = false;
+ synchronized (allMapEvents) {
+ if (allMapEvents.size() > fromId) {
+ int actualMax = Math.min(max, (allMapEvents.size() - fromId));
+ List <TaskCompletionEvent> eventSublist =
+ allMapEvents.subList(fromId, actualMax + fromId);
+ mapEvents = eventSublist.toArray(mapEvents);
+ } else {
+ // Notify Fetcher thread.
+ notifyFetcher = true;
+ }
+ }
+ if (notifyFetcher) {
+ synchronized (waitingOn) {
+ waitingOn.notify();
+ }
+ }
+ return mapEvents;
+ }
+
+ public boolean fetchMapCompletionEvents(long currTime) throws IOException {
+ if (!fetchAgain && (currTime - lastFetchTime) < heartbeatInterval) {
+ return false;
+ }
+ int currFromEventId = 0;
+ synchronized (fromEventId) {
+ currFromEventId = fromEventId.get();
+ List <TaskCompletionEvent> recentMapEvents =
+ queryJobTracker(fromEventId, jobId, jobClient);
+ synchronized (allMapEvents) {
+ allMapEvents.addAll(recentMapEvents);
+ }
+ lastFetchTime = currTime;
+ if (fromEventId.get() - currFromEventId >= probe_sample_size) {
+ //return true when we have fetched the full payload, indicating
+ //that we should fetch again immediately (there might be more to
+ //fetch
+ fetchAgain = true;
+ return true;
+ }
+ }
+ fetchAgain = false;
+ return false;
+ }
+ }
+
+ private static LocalDirAllocator lDirAlloc =
+ new LocalDirAllocator(MRConfig.LOCAL_DIR);
+
+ // intialize the job directory
+ RunningJob localizeJob(TaskInProgress tip
+ ) throws IOException, InterruptedException {
+ Task t = tip.getTask();
+ JobID jobId = t.getJobID();
+ RunningJob rjob = addTaskToJob(jobId, tip);
+
+ // Initialize the user directories if needed.
+ getLocalizer().initializeUserDirs(t.getUser());
+
+ synchronized (rjob) {
+ if (!rjob.localized) {
+
+ JobConf localJobConf = localizeJobFiles(t, rjob);
+ // initialize job log directory
+ initializeJobLogDir(jobId, localJobConf);
+
+ // Now initialize the job via task-controller so as to set
+ // ownership/permissions of jars, job-work-dir. Note that initializeJob
+ // should be the last call after every other directory/file to be
+ // directly under the job directory is created.
+ JobInitializationContext context = new JobInitializationContext();
+ context.jobid = jobId;
+ context.user = t.getUser();
+ context.workDir = new File(localJobConf.get(JOB_LOCAL_DIR));
+ taskController.initializeJob(context);
+
+ rjob.jobConf = localJobConf;
+ rjob.keepJobFiles = ((localJobConf.getKeepTaskFilesPattern() != null) ||
+ localJobConf.getKeepFailedTaskFiles());
+ rjob.localized = true;
+ }
+ }
+ return rjob;
+ }
+
+ private FileSystem getFS(final Path filePath, JobID jobId,
+ final Configuration conf) throws IOException, InterruptedException {
+ RunningJob rJob = runningJobs.get(jobId);
+ FileSystem userFs =
+ rJob.ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
+ public FileSystem run() throws IOException {
+ return filePath.getFileSystem(conf);
+ }});
+ return userFs;
+ }
+
+ /**
+ * Localize the job on this tasktracker. Specifically
+ * <ul>
+ * <li>Cleanup and create job directories on all disks</li>
+ * <li>Download the job config file job.xml from the FS</li>
+ * <li>Create the job work directory and set {@link TaskTracker#JOB_LOCAL_DIR}
+ * in the configuration.
+ * <li>Download the job jar file job.jar from the FS, unjar it and set jar
+ * file in the configuration.</li>
+ * </ul>
+ *
+ * @param t task whose job has to be localized on this TT
+ * @return the modified job configuration to be used for all the tasks of this
+ * job as a starting point.
+ * @throws IOException
+ */
+ JobConf localizeJobFiles(Task t, RunningJob rjob)
+ throws IOException, InterruptedException {
+ JobID jobId = t.getJobID();
+ String userName = t.getUser();
+
+ // Initialize the job directories
+ FileSystem localFs = FileSystem.getLocal(fConf);
+ getLocalizer().initializeJobDirs(userName, jobId);
+ // save local copy of JobToken file
+ String localJobTokenFile = localizeJobTokenFile(t.getUser(), jobId);
+ rjob.ugi = UserGroupInformation.createRemoteUser(t.getUser());
+
+ Credentials ts = TokenCache.loadTokens(localJobTokenFile, fConf);
+ Token<JobTokenIdentifier> jt = TokenCache.getJobToken(ts);
+ if (jt != null) { //could be null in the case of some unit tests
+ getJobTokenSecretManager().addTokenForJob(jobId.toString(), jt);
+ }
+ for (Token<? extends TokenIdentifier> token : ts.getAllTokens()) {
+ rjob.ugi.addToken(token);
+ }
+ // Download the job.xml for this job from the system FS
+ Path localJobFile =
+ localizeJobConfFile(new Path(t.getJobFile()), userName, jobId);
+
+ JobConf localJobConf = new JobConf(localJobFile);
+ //WE WILL TRUST THE USERNAME THAT WE GOT FROM THE JOBTRACKER
+ //AS PART OF THE TASK OBJECT
+ localJobConf.setUser(userName);
+
+ // set the location of the token file into jobConf to transfer
+ // the name to TaskRunner
+ localJobConf.set(TokenCache.JOB_TOKENS_FILENAME,
+ localJobTokenFile);
+
+
+ // create the 'job-work' directory: job-specific shared directory for use as
+ // scratch space by all tasks of the same job running on this TaskTracker.
+ Path workDir =
+ lDirAlloc.getLocalPathForWrite(getJobWorkDir(userName, jobId
+ .toString()), fConf);
+ if (!localFs.mkdirs(workDir)) {
+ throw new IOException("Mkdirs failed to create "
+ + workDir.toString());
+ }
+ System.setProperty(JOB_LOCAL_DIR, workDir.toUri().getPath());
+ localJobConf.set(JOB_LOCAL_DIR, workDir.toUri().getPath());
+ // Download the job.jar for this job from the system FS
+ localizeJobJarFile(userName, jobId, localFs, localJobConf);
+
+ return localJobConf;
+ }
+
+ // Create job userlog dir.
+ // Create job acls file in job log dir, if needed.
+ void initializeJobLogDir(JobID jobId, JobConf localJobConf)
+ throws IOException {
+ // remove it from tasklog cleanup thread first,
+ // it might be added there because of tasktracker reinit or restart
+ taskLogCleanupThread.unmarkJobFromLogDeletion(jobId);
+ localizer.initializeJobLogDir(jobId);
+
+ if (areACLsEnabled()) {
+ // Create job-acls.xml file in job userlog dir and write the needed
+ // info for authorization of users for viewing task logs of this job.
+ writeJobACLs(localJobConf, TaskLog.getJobDir(jobId));
+ }
+ }
+
+ /**
+ * Creates job-acls.xml under the given directory logDir and writes
+ * job-view-acl, queue-admins-acl, jobOwner name and queue name into this
+ * file.
+ * queue name is the queue to which the job was submitted to.
+ * queue-admins-acl is the queue admins ACL of the queue to which this
+ * job was submitted to.
+ * @param conf job configuration
+ * @param logDir job userlog dir
+ * @throws IOException
+ */
+ private static void writeJobACLs(JobConf conf, File logDir)
+ throws IOException {
+ File aclFile = new File(logDir, jobACLsFile);
+ JobConf aclConf = new JobConf(false);
+
+ // set the job view acl in aclConf
+ String jobViewACL = conf.get(MRJobConfig.JOB_ACL_VIEW_JOB, " ");
+ aclConf.set(MRJobConfig.JOB_ACL_VIEW_JOB, jobViewACL);
+
+ // set the job queue name in aclConf
+ String queue = conf.getQueueName();
+ aclConf.setQueueName(queue);
+
+ // set the queue admins acl in aclConf
+ String qACLName = toFullPropertyName(queue,
+ QueueACL.ADMINISTER_JOBS.getAclName());
+ String queueAdminsACL = conf.get(qACLName, " ");
+ aclConf.set(qACLName, queueAdminsACL);
+
+ // set jobOwner as user.name in aclConf
+ String jobOwner = conf.getUser();
+ aclConf.set("user.name", jobOwner);
+
+ FileOutputStream out;
+ try {
+ out = SecureIOUtils.createForWrite(aclFile, 0600);
+ } catch (SecureIOUtils.AlreadyExistsException aee) {
+ LOG.warn("Job ACL file already exists at " + aclFile, aee);
+ return;
+ }
+ try {
+ aclConf.writeXml(out);
+ } finally {
+ out.close();
+ }
+ }
+
+ /**
+ * Download the job configuration file from the FS.
+ *
+ * @param t Task whose job file has to be downloaded
+ * @param jobId jobid of the task
+ * @return the local file system path of the downloaded file.
+ * @throws IOException
+ */
+ private Path localizeJobConfFile(Path jobFile, String user, JobID jobId)
+ throws IOException, InterruptedException {
+ final JobConf conf = new JobConf(getJobConf());
+ FileSystem userFs = getFS(jobFile, jobId, conf);
+ // Get sizes of JobFile
+ // sizes are -1 if they are not present.
+ FileStatus status = null;
+ long jobFileSize = -1;
+ try {
+ status = userFs.getFileStatus(jobFile);
+ jobFileSize = status.getLen();
+ } catch(FileNotFoundException fe) {
+ jobFileSize = -1;
+ }
+
+ Path localJobFile =
+ lDirAlloc.getLocalPathForWrite(getLocalJobConfFile(user, jobId.toString()),
+ jobFileSize, fConf);
+
+ // Download job.xml
+ userFs.copyToLocalFile(jobFile, localJobFile);
+ return localJobFile;
+ }
+
+ /**
+ * Download the job jar file from FS to the local file system and unjar it.
+ * Set the local jar file in the passed configuration.
+ *
+ * @param jobId
+ * @param localFs
+ * @param localJobConf
+ * @throws IOException
+ */
+ private void localizeJobJarFile(String user, JobID jobId, FileSystem localFs,
+ JobConf localJobConf)
+ throws IOException, InterruptedException {
+ // copy Jar file to the local FS and unjar it.
+ String jarFile = localJobConf.getJar();
+ FileStatus status = null;
+ long jarFileSize = -1;
+ if (jarFile != null) {
+ Path jarFilePath = new Path(jarFile);
+ FileSystem fs = getFS(jarFilePath, jobId, localJobConf);
+ try {
+ status = fs.getFileStatus(jarFilePath);
+ jarFileSize = status.getLen();
+ } catch (FileNotFoundException fe) {
+ jarFileSize = -1;
+ }
+ // Here we check for five times the size of jarFileSize to accommodate for
+ // unjarring the jar file in the jars directory
+ Path localJarFile =
+ lDirAlloc.getLocalPathForWrite(
+ getJobJarFile(user, jobId.toString()), 5 * jarFileSize, fConf);
+
+ // Download job.jar
+ fs.copyToLocalFile(jarFilePath, localJarFile);
+
+ localJobConf.setJar(localJarFile.toString());
+
+ // Un-jar the parts of the job.jar that need to be added to the classpath
+ RunJar.unJar(
+ new File(localJarFile.toString()),
+ new File(localJarFile.getParent().toString()),
+ localJobConf.getJarUnpackPattern());
+ }
+ }
+
+ protected void launchTaskForJob(TaskInProgress tip, JobConf jobConf,
+ UserGroupInformation ugi) throws IOException {
+ synchronized (tip) {
+ tip.setJobConf(jobConf);
+ tip.setUGI(ugi);
+ tip.launchTask();
+ }
+ }
+
+ public synchronized void shutdown() throws IOException {
+ shuttingDown = true;
+ close();
+ if (this.server != null) {
+ try {
+ LOG.info("Shutting down StatusHttpServer");
+ this.server.stop();
+ } catch (Exception e) {
+ LOG.warn("Exception shutting down TaskTracker", e);
+ }
+ }
+ }
+ /**
+ * Close down the TaskTracker and all its components. We must also shutdown
+ * any running tasks or threads, and cleanup disk space. A new TaskTracker
+ * within the same process space might be restarted, so everything must be
+ * clean.
+ */
+ public synchronized void close() throws IOException {
+ //
+ // Kill running tasks. Do this in a 2nd vector, called 'tasksToClose',
+ // because calling jobHasFinished() may result in an edit to 'tasks'.
+ //
+ TreeMap<TaskAttemptID, TaskInProgress> tasksToClose =
+ new TreeMap<TaskAttemptID, TaskInProgress>();
+ tasksToClose.putAll(tasks);
+ for (TaskInProgress tip : tasksToClose.values()) {
+ tip.jobHasFinished(false);
+ }
+
+ this.running = false;
+
+ if (asyncDiskService != null) {
+ // Clear local storage
+ asyncDiskService.cleanupAllVolumes();
+
+ // Shutdown all async deletion threads with up to 10 seconds of delay
+ asyncDiskService.shutdown();
+ try {
+ if (!asyncDiskService.awaitTermination(10000)) {
+ asyncDiskService.shutdownNow();
+ asyncDiskService = null;
+ }
+ } catch (InterruptedException e) {
+ asyncDiskService.shutdownNow();
+ asyncDiskService = null;
+ }
+ }
+
+ // Shutdown the fetcher thread
+ this.mapEventsFetcher.interrupt();
+
+ //stop the launchers
+ this.mapLauncher.interrupt();
+ this.reduceLauncher.interrupt();
+
+ this.distributedCacheManager.stopCleanupThread();
+ jvmManager.stop();
+
+ // shutdown RPC connections
+ RPC.stopProxy(jobClient);
+
+ // wait for the fetcher thread to exit
+ for (boolean done = false; !done; ) {
+ try {
+ this.mapEventsFetcher.join();
+ done = true;
+ } catch (InterruptedException e) {
+ }
+ }
+
+ if (taskReportServer != null) {
+ taskReportServer.stop();
+ taskReportServer = null;
+ }
+ if (healthChecker != null) {
+ //stop node health checker service
+ healthChecker.stop();
+ healthChecker = null;
+ }
+ }
+
+ /**
+ * For testing
+ */
+ TaskTracker() {
+ server = null;
+ }
+
+ void setConf(JobConf conf) {
+ fConf = conf;
+ }
+
+ /**
+ * Start with the local machine name, and the default JobTracker
+ */
+ public TaskTracker(JobConf conf) throws IOException, InterruptedException {
+ fConf = conf;
+ maxMapSlots = conf.getInt(TT_MAP_SLOTS, 2);
+ maxReduceSlots = conf.getInt(TT_REDUCE_SLOTS, 2);
+ aclsManager = new ACLsManager(fConf, new JobACLsManager(fConf), null);
+ this.jobTrackAddr = JobTracker.getAddress(conf);
+ InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(
+ conf.get(TT_HTTP_ADDRESS, "0.0.0.0:50060"));
+ String httpBindAddress = infoSocAddr.getHostName();
+ int httpPort = infoSocAddr.getPort();
+ this.server = new HttpServer("task", httpBindAddress, httpPort,
+ httpPort == 0, conf, aclsManager.getAdminsAcl());
+ workerThreads = conf.getInt(TT_HTTP_THREADS, 40);
+ this.shuffleServerMetrics = new ShuffleServerMetrics(conf);
+ server.setThreads(1, workerThreads);
+ // let the jsp pages get to the task tracker, config, and other relevant
+ // objects
+ FileSystem local = FileSystem.getLocal(conf);
+ this.localDirAllocator = new LocalDirAllocator(MRConfig.LOCAL_DIR);
+ server.setAttribute("task.tracker", this);
+ server.setAttribute("local.file.system", local);
+ server.setAttribute("conf", conf);
+ server.setAttribute("log", LOG);
+ server.setAttribute("localDirAllocator", localDirAllocator);
+ server.setAttribute("shuffleServerMetrics", shuffleServerMetrics);
+ String exceptionStackRegex = conf.get(JTConfig.SHUFFLE_EXCEPTION_STACK_REGEX);
+ String exceptionMsgRegex = conf.get(JTConfig.SHUFFLE_EXCEPTION_MSG_REGEX);
+ server.setAttribute("exceptionStackRegex", exceptionStackRegex);
+ server.setAttribute("exceptionMsgRegex", exceptionMsgRegex);
+ server.addInternalServlet("mapOutput", "/mapOutput", MapOutputServlet.class);
+ server.addServlet("taskLog", "/tasklog", TaskLogServlet.class);
+ server.start();
+ this.httpPort = server.getPort();
+ checkJettyPort(httpPort);
+ // create task log cleanup thread
+ setTaskLogCleanupThread(new UserLogCleaner(fConf));
+
+ UserGroupInformation.setConfiguration(fConf);
+ SecurityUtil.login(fConf, TTConfig.TT_KEYTAB_FILE, TTConfig.TT_USER_NAME);
+
+ initialize();
+ }
+
+ private void checkJettyPort(int port) throws IOException {
+ //See HADOOP-4744
+ if (port < 0) {
+ shuttingDown = true;
+ throw new IOException("Jetty problem. Jetty didn't bind to a " +
+ "valid port");
+ }
+ }
+
+ private void startCleanupThreads() throws IOException {
+ taskCleanupThread.setDaemon(true);
+ taskCleanupThread.start();
+ directoryCleanupThread = new CleanupQueue();
+ // start tasklog cleanup thread
+ taskLogCleanupThread.setDaemon(true);
+ taskLogCleanupThread.start();
+ }
+
+ // only used by tests
+ void setCleanupThread(CleanupQueue c) {
+ directoryCleanupThread = c;
+ }
+
+ CleanupQueue getCleanupThread() {
+ return directoryCleanupThread;
+ }
+
+ UserLogCleaner getTaskLogCleanupThread() {
+ return this.taskLogCleanupThread;
+ }
+
+ void setTaskLogCleanupThread(UserLogCleaner t) {
+ this.taskLogCleanupThread = t;
+ }
+
+ void setIndexCache(IndexCache cache) {
+ this.indexCache = cache;
+ }
+
+ /**
+ * The connection to the JobTracker, used by the TaskRunner
+ * for locating remote files.
+ */
+ public InterTrackerProtocol getJobClient() {
+ return jobClient;
+ }
+
+ /** Return the port at which the tasktracker bound to */
+ public synchronized InetSocketAddress getTaskTrackerReportAddress() {
+ return taskReportAddress;
+ }
+
+ /** Queries the job tracker for a set of outputs ready to be copied
+ * @param fromEventId the first event ID we want to start from, this is
+ * modified by the call to this method
+ * @param jobClient the job tracker
+ * @return a set of locations to copy outputs from
+ * @throws IOException
+ */
+ private List<TaskCompletionEvent> queryJobTracker(IntWritable fromEventId,
+ JobID jobId,
+ InterTrackerProtocol jobClient)
+ throws IOException {
+
+ TaskCompletionEvent t[] = jobClient.getTaskCompletionEvents(
+ jobId,
+ fromEventId.get(),
+ probe_sample_size);
+ //we are interested in map task completion events only. So store
+ //only those
+ List <TaskCompletionEvent> recentMapEvents =
+ new ArrayList<TaskCompletionEvent>();
+ for (int i = 0; i < t.length; i++) {
+ if (t[i].isMapTask()) {
+ recentMapEvents.add(t[i]);
+ }
+ }
+ fromEventId.set(fromEventId.get() + t.length);
+ return recentMapEvents;
+ }
+
+ /**
+ * Main service loop. Will stay in this loop forever.
+ */
+ State offerService() throws Exception {
+ long lastHeartbeat = 0;
+
+ while (running && !shuttingDown) {
+ try {
+ long now = System.currentTimeMillis();
+
+ long waitTime = heartbeatInterval - (now - lastHeartbeat);
+ if (waitTime > 0) {
+ // sleeps for the wait time or
+ // until there are empty slots to schedule tasks
+ synchronized (finishedCount) {
+ if (finishedCount.get() == 0) {
+ finishedCount.wait(waitTime);
+ }
+ finishedCount.set(0);
+ }
+ }
+
+ // If the TaskTracker is just starting up:
+ // 1. Verify the buildVersion
+ // 2. Get the system directory & filesystem
+ if(justInited) {
+ String jobTrackerBV = jobClient.getBuildVersion();
+ if(!VersionInfo.getBuildVersion().equals(jobTrackerBV)) {
+ String msg = "Shutting down. Incompatible buildVersion." +
+ "\nJobTracker's: " + jobTrackerBV +
+ "\nTaskTracker's: "+ VersionInfo.getBuildVersion();
+ LOG.error(msg);
+ try {
+ jobClient.reportTaskTrackerError(taskTrackerName, null, msg);
+ } catch(Exception e ) {
+ LOG.info("Problem reporting to jobtracker: " + e);
+ }
+ return State.DENIED;
+ }
+
+ String dir = jobClient.getSystemDir();
+ if (dir == null) {
+ throw new IOException("Failed to get system directory");
+ }
+ systemDirectory = new Path(dir);
+ systemFS = systemDirectory.getFileSystem(fConf);
+ }
+
+ // Send the heartbeat and process the jobtracker's directives
+ HeartbeatResponse heartbeatResponse = transmitHeartBeat(now);
+
+ // Note the time when the heartbeat returned, use this to decide when to send the
+ // next heartbeat
+ lastHeartbeat = System.currentTimeMillis();
+
+ TaskTrackerAction[] actions = heartbeatResponse.getActions();
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Got heartbeatResponse from JobTracker with responseId: " +
+ heartbeatResponse.getResponseId() + " and " +
+ ((actions != null) ? actions.length : 0) + " actions");
+ }
+ if (reinitTaskTracker(actions)) {
+ return State.STALE;
+ }
+
+ // resetting heartbeat interval from the response.
+ heartbeatInterval = heartbeatResponse.getHeartbeatInterval();
+ justStarted = false;
+ justInited = false;
+ if (actions != null){
+ for(TaskTrackerAction action: actions) {
+ if (action instanceof LaunchTaskAction) {
+ addToTaskQueue((LaunchTaskAction)action);
+ } else if (action instanceof CommitTaskAction) {
+ CommitTaskAction commitAction = (CommitTaskAction)action;
+ if (!commitResponses.contains(commitAction.getTaskID())) {
+ LOG.info("Received commit task action for " +
+ commitAction.getTaskID());
+ commitResponses.add(commitAction.getTaskID());
+ }
+ } else {
+ tasksToCleanup.put(action);
+ }
+ }
+ }
+ markUnresponsiveTasks();
+ killOverflowingTasks();
+
+ //we've cleaned up, resume normal operation
+ if (!acceptNewTasks && isIdle()) {
+ acceptNewTasks=true;
+ }
+ //The check below may not be required every iteration but we are
+ //erring on the side of caution here. We have seen many cases where
+ //the call to jetty's getLocalPort() returns different values at
+ //different times. Being a real paranoid here.
+ checkJettyPort(server.getPort());
+ } catch (InterruptedException ie) {
+ LOG.info("Interrupted. Closing down.");
+ return State.INTERRUPTED;
+ } catch (DiskErrorException de) {
+ String msg = "Exiting task tracker for disk error:\n" +
+ StringUtils.stringifyException(de);
+ LOG.error(msg);
+ synchronized (this) {
+ jobClient.reportTaskTrackerError(taskTrackerName,
+ "DiskErrorException", msg);
+ }
+ return State.STALE;
+ } catch (RemoteException re) {
+ String reClass = re.getClassName();
+ if (DisallowedTaskTrackerException.class.getName().equals(reClass)) {
+ LOG.info("Tasktracker disallowed by JobTracker.");
+ return State.DENIED;
+ }
+ } catch (Exception except) {
+ String msg = "Caught exception: " +
+ StringUtils.stringifyException(except);
+ LOG.error(msg);
+ }
+ }
+
+ return State.NORMAL;
+ }
+
+ private long previousUpdate = 0;
+
+ /**
+ * Build and transmit the heart beat to the JobTracker
+ * @param now current time
+ * @return false if the tracker was unknown
+ * @throws IOException
+ */
+ HeartbeatResponse transmitHeartBeat(long now) throws IOException {
+ // Send Counters in the status once every COUNTER_UPDATE_INTERVAL
+ boolean sendAllCounters;
+ if (now > (previousUpdate + COUNTER_UPDATE_INTERVAL)) {
+ sendAllCounters = true;
+ previousUpdate = now;
+ }
+ else {
+ sendAllCounters = false;
+ }
+
+ //
+ // Check if the last heartbeat got through...
+ // if so then build the heartbeat information for the JobTracker;
+ // else resend the previous status information.
+ //
+ if (status == null) {
+ synchronized (this) {
+ status = new TaskTrackerStatus(taskTrackerName, localHostname,
+ httpPort,
+ cloneAndResetRunningTaskStatuses(
+ sendAllCounters),
+ failures,
+ maxMapSlots,
+ maxReduceSlots);
+ }
+ } else {
+ LOG.info("Resending 'status' to '" + jobTrackAddr.getHostName() +
+ "' with reponseId '" + heartbeatResponseId);
+ }
+
+ //
+ // Check if we should ask for a new Task
+ //
+ boolean askForNewTask;
+ long localMinSpaceStart;
+ synchronized (this) {
+ askForNewTask =
+ ((status.countOccupiedMapSlots() < maxMapSlots ||
+ status.countOccupiedReduceSlots() < maxReduceSlots) &&
+ acceptNewTasks);
+ localMinSpaceStart = minSpaceStart;
+ }
+ if (askForNewTask) {
+ checkLocalDirs(fConf.getLocalDirs());
+ askForNewTask = enoughFreeSpace(localMinSpaceStart);
+ long freeDiskSpace = getFreeSpace();
+ long totVmem = getTotalVirtualMemoryOnTT();
+ long totPmem = getTotalPhysicalMemoryOnTT();
+ long availableVmem = getAvailableVirtualMemoryOnTT();
+ long availablePmem = getAvailablePhysicalMemoryOnTT();
+ long cumuCpuTime = getCumulativeCpuTimeOnTT();
+ long cpuFreq = getCpuFrequencyOnTT();
+ int numCpu = getNumProcessorsOnTT();
+ float cpuUsage = getCpuUsageOnTT();
+
+ status.getResourceStatus().setAvailableSpace(freeDiskSpace);
+ status.getResourceStatus().setTotalVirtualMemory(totVmem);
+ status.getResourceStatus().setTotalPhysicalMemory(totPmem);
+ status.getResourceStatus().setMapSlotMemorySizeOnTT(
+ mapSlotMemorySizeOnTT);
+ status.getResourceStatus().setReduceSlotMemorySizeOnTT(
+ reduceSlotSizeMemoryOnTT);
+ status.getResourceStatus().setAvailableVirtualMemory(availableVmem);
+ status.getResourceStatus().setAvailablePhysicalMemory(availablePmem);
+ status.getResourceStatus().setCumulativeCpuTime(cumuCpuTime);
+ status.getResourceStatus().setCpuFrequency(cpuFreq);
+ status.getResourceStatus().setNumProcessors(numCpu);
+ status.getResourceStatus().setCpuUsage(cpuUsage);
+ }
+ //add node health information
+
+ TaskTrackerHealthStatus healthStatus = status.getHealthStatus();
+ synchronized (this) {
+ if (healthChecker != null) {
+ healthChecker.setHealthStatus(healthStatus);
+ } else {
+ healthStatus.setNodeHealthy(true);
+ healthStatus.setLastReported(0L);
+ healthStatus.setHealthReport("");
+ }
+ }
+ //
+ // Xmit the heartbeat
+ //
+ HeartbeatResponse heartbeatResponse = jobClient.heartbeat(status,
+ justStarted,
+ justInited,
+ askForNewTask,
+ heartbeatResponseId);
+
+ //
+ // The heartbeat got through successfully!
+ //
+ heartbeatResponseId = heartbeatResponse.getResponseId();
+
+ synchronized (this) {
+ for (TaskStatus taskStatus : status.getTaskReports()) {
+ if (taskStatus.getRunState() != TaskStatus.State.RUNNING &&
+ taskStatus.getRunState() != TaskStatus.State.UNASSIGNED &&
+ taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING &&
+ !taskStatus.inTaskCleanupPhase()) {
+ if (taskStatus.getIsMap()) {
+ mapTotal--;
+ } else {
+ reduceTotal--;
+ }
+ try {
+ myInstrumentation.completeTask(taskStatus.getTaskID());
+ } catch (MetricsException me) {
+ LOG.warn("Caught: " + StringUtils.stringifyException(me));
+ }
+ runningTasks.remove(taskStatus.getTaskID());
+ }
+ }
+
+ // Clear transient status information which should only
+ // be sent once to the JobTracker
+ for (TaskInProgress tip: runningTasks.values()) {
+ tip.getStatus().clearStatus();
+ }
+ }
+
+ // Force a rebuild of 'status' on the next iteration
+ status = null;
+
+ return heartbeatResponse;
+ }
+
+ /**
+ * Return the total virtual memory available on this TaskTracker.
+ * @return total size of virtual memory.
+ */
+ long getTotalVirtualMemoryOnTT() {
+ return totalVirtualMemoryOnTT;
+ }
+
+ /**
+ * Return the total physical memory available on this TaskTracker.
+ * @return total size of physical memory.
+ */
+ long getTotalPhysicalMemoryOnTT() {
+ return totalPhysicalMemoryOnTT;
+ }
+
+ /**
+ * Return the free virtual memory available on this TaskTracker.
+ * @return total size of free virtual memory.
+ */
+ long getAvailableVirtualMemoryOnTT() {
+ long availableVirtualMemoryOnTT = TaskTrackerStatus.UNAVAILABLE;
+ if (resourceCalculatorPlugin != null) {
+ availableVirtualMemoryOnTT =
+ resourceCalculatorPlugin.getAvailableVirtualMemorySize();
+ }
+ return availableVirtualMemoryOnTT;
+ }
+
+ /**
+ * Return the free physical memory available on this TaskTracker.
+ * @return total size of free physical memory in bytes
+ */
+ long getAvailablePhysicalMemoryOnTT() {
+ long availablePhysicalMemoryOnTT = TaskTrackerStatus.UNAVAILABLE;
+ if (resourceCalculatorPlugin != null) {
+ availablePhysicalMemoryOnTT =
+ resourceCalculatorPlugin.getAvailablePhysicalMemorySize();
+ }
+ return availablePhysicalMemoryOnTT;
+ }
+
+ /**
+ * Return the cumulative CPU used time on this TaskTracker since system is on
+ * @return cumulative CPU used time in millisecond
+ */
+ long getCumulativeCpuTimeOnTT() {
+ long cumulativeCpuTime = TaskTrackerStatus.UNAVAILABLE;
+ if (resourceCalculatorPlugin != null) {
+ cumulativeCpuTime = resourceCalculatorPlugin.getCumulativeCpuTime();
+ }
+ return cumulativeCpuTime;
+ }
+
+ /**
+ * Return the number of Processors on this TaskTracker
+ * @return number of processors
+ */
+ int getNumProcessorsOnTT() {
+ int numProcessors = TaskTrackerStatus.UNAVAILABLE;
+ if (resourceCalculatorPlugin != null) {
+ numProcessors = resourceCalculatorPlugin.getNumProcessors();
+ }
+ return numProcessors;
+ }
+
+ /**
+ * Return the CPU frequency of this TaskTracker
+ * @return CPU frequency in kHz
+ */
+ long getCpuFrequencyOnTT() {
+ long cpuFrequency = TaskTrackerStatus.UNAVAILABLE;
+ if (resourceCalculatorPlugin != null) {
+ cpuFrequency = resourceCalculatorPlugin.getCpuFrequency();
+ }
+ return cpuFrequency;
+ }
+
+ /**
+ * Return the CPU usage in % of this TaskTracker
+ * @return CPU usage in %
+ */
+ float getCpuUsageOnTT() {
+ float cpuUsage = TaskTrackerStatus.UNAVAILABLE;
+ if (resourceCalculatorPlugin != null) {
+ cpuUsage = resourceCalculatorPlugin.getCpuUsage();
+ }
+ return cpuUsage;
+ }
+
+ long getTotalMemoryAllottedForTasksOnTT() {
+ return totalMemoryAllottedForTasks;
+ }
+
+ /**
+ * @return The amount of physical memory that will not be used for running
+ * tasks in bytes. Returns JobConf.DISABLED_MEMORY_LIMIT if it is not
+ * configured.
+ */
+ long getReservedPhysicalMemoryOnTT() {
+ return reservedPhysicalMemoryOnTT;
+ }
+
+ /**
+ * Check if the jobtracker directed a 'reset' of the tasktracker.
+ *
+ * @param actions the directives of the jobtracker for the tasktracker.
+ * @return <code>true</code> if tasktracker is to be reset,
+ * <code>false</code> otherwise.
+ */
+ private boolean reinitTaskTracker(TaskTrackerAction[] actions) {
+ if (actions != null) {
+ for (TaskTrackerAction action : actions) {
+ if (action.getActionId() ==
+ TaskTrackerAction.ActionType.REINIT_TRACKER) {
+ LOG.info("Received ReinitTrackerAction from JobTracker");
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Kill any tasks that have not reported progress in the last X seconds.
+ */
+ private synchronized void markUnresponsiveTasks() throws IOException {
+ long now = System.currentTimeMillis();
+ for (TaskInProgress tip: runningTasks.values()) {
+ if (tip.getRunState() == TaskStatus.State.RUNNING ||
+ tip.getRunState() == TaskStatus.State.COMMIT_PENDING ||
+ tip.isCleaningup()) {
+ // Check the per-job timeout interval for tasks;
+ // an interval of '0' implies it is never timed-out
+ long jobTaskTimeout = tip.getTaskTimeout();
+ if (jobTaskTimeout == 0) {
+ continue;
+ }
+
+ // Check if the task has not reported progress for a
+ // time-period greater than the configured time-out
+ long timeSinceLastReport = now - tip.getLastProgressReport();
+ if (timeSinceLastReport > jobTaskTimeout && !tip.wasKilled) {
+ String msg =
+ "Task " + tip.getTask().getTaskID() + " failed to report status for "
+ + (timeSinceLastReport / 1000) + " seconds. Killing!";
+ LOG.info(tip.getTask().getTaskID() + ": " + msg);
+ ReflectionUtils.logThreadInfo(LOG, "lost task", 30);
+ tip.reportDiagnosticInfo(msg);
+ myInstrumentation.timedoutTask(tip.getTask().getTaskID());
+ dumpTaskStack(tip);
+ purgeTask(tip, true);
+ }
+ }
+ }
+ }
+
+ /**
+ * Builds list of PathDeletionContext objects for the given paths
+ */
+ private static PathDeletionContext[] buildPathDeletionContexts(FileSystem fs,
+ Path[] paths) {
+ int i = 0;
+ PathDeletionContext[] contexts = new PathDeletionContext[paths.length];
+
+ for (Path p : paths) {
+ contexts[i++] = new PathDeletionContext(fs, p.toUri().getPath());
+ }
+ return contexts;
+ }
+
+ /**
+ * Builds list of {@link TaskControllerJobPathDeletionContext} objects for a
+ * job each pointing to the job's jobLocalDir.
+ * @param fs : FileSystem in which the dirs to be deleted
+ * @param paths : mapred-local-dirs
+ * @param id : {@link JobID} of the job for which the local-dir needs to
+ * be cleaned up.
+ * @param user : Job owner's username
+ * @param taskController : the task-controller to be used for deletion of
+ * jobLocalDir
+ */
+ static PathDeletionContext[] buildTaskControllerJobPathDeletionContexts(
+ FileSystem fs, Path[] paths, JobID id, String user,
+ TaskController taskController)
+ throws IOException {
+ int i = 0;
+ PathDeletionContext[] contexts =
+ new TaskControllerPathDeletionContext[paths.length];
+
+ for (Path p : paths) {
+ contexts[i++] = new TaskControllerJobPathDeletionContext(fs, p, id, user,
+ taskController);
+ }
+ return contexts;
+ }
+
+ /**
+ * Builds list of TaskControllerTaskPathDeletionContext objects for a task
+ * @param fs : FileSystem in which the dirs to be deleted
+ * @param paths : mapred-local-dirs
+ * @param task : the task whose taskDir or taskWorkDir is going to be deleted
+ * @param isWorkDir : the dir to be deleted is workDir or taskDir
+ * @param taskController : the task-controller to be used for deletion of
+ * taskDir or taskWorkDir
+ */
+ static PathDeletionContext[] buildTaskControllerTaskPathDeletionContexts(
+ FileSystem fs, Path[] paths, Task task, boolean isWorkDir,
+ TaskController taskController)
+ throws IOException {
+ int i = 0;
+ PathDeletionContext[] contexts =
+ new TaskControllerPathDeletionContext[paths.length];
+
+ for (Path p : paths) {
+ contexts[i++] = new TaskControllerTaskPathDeletionContext(fs, p, task,
+ isWorkDir, taskController);
+ }
+ return contexts;
+ }
+
+ /**
+ * Send a signal to a stuck task commanding it to dump stack traces
+ * to stderr before we kill it with purgeTask().
+ *
+ * @param tip {@link TaskInProgress} to dump stack traces.
+ */
+ private void dumpTaskStack(TaskInProgress tip) {
+ TaskRunner runner = tip.getTaskRunner();
+ if (null == runner) {
+ return; // tip is already abandoned.
+ }
+
+ JvmManager jvmMgr = runner.getJvmManager();
+ jvmMgr.dumpStack(runner);
+ }
+
+ /**
+ * The task tracker is done with this job, so we need to clean up.
+ * @param action The action with the job
+ * @throws IOException
+ */
+ synchronized void purgeJob(KillJobAction action) throws IOException {
+ JobID jobId = action.getJobID();
+ LOG.info("Received 'KillJobAction' for job: " + jobId);
+ RunningJob rjob = null;
+ synchronized (runningJobs) {
+ rjob = runningJobs.get(jobId);
+ }
+
+ if (rjob == null) {
+ LOG.warn("Unknown job " + jobId + " being deleted.");
+ } else {
+ synchronized (rjob) {
+ // Add this tips of this job to queue of tasks to be purged
+ for (TaskInProgress tip : rjob.tasks) {
+ tip.jobHasFinished(false);
+ Task t = tip.getTask();
+ if (t.isMapTask()) {
+ indexCache.removeMap(tip.getTask().getTaskID().toString());
+ }
+ }
+ // Delete the job directory for this
+ // task if the job is done/failed
+ if (!rjob.keepJobFiles) {
+ removeJobFiles(rjob.jobConf.getUser(), rjob.getJobID());
+ }
+ // add job to taskLogCleanupThread
+ long now = System.currentTimeMillis();
+ taskLogCleanupThread.markJobLogsForDeletion(now, rjob.jobConf,
+ rjob.jobid);
+
+ // Remove this job
+ rjob.tasks.clear();
+ // Close all FileSystems for this job
+ try {
+ FileSystem.closeAllForUGI(rjob.getUGI());
+ } catch (IOException ie) {
+ LOG.warn("Ignoring exception " + StringUtils.stringifyException(ie) +
+ " while closing FileSystem for " + rjob.getUGI());
+ }
+ }
+ }
+
+ synchronized(runningJobs) {
+ runningJobs.remove(jobId);
+ }
+ getJobTokenSecretManager().removeTokenForJob(jobId.toString());
+ }
+
+ /**
+ * This job's files are no longer needed on this TT, remove them.
+ *
+ * @param rjob
+ * @throws IOException
+ */
+ void removeJobFiles(String user, JobID jobId)
+ throws IOException {
+ PathDeletionContext[] contexts =
+ buildTaskControllerJobPathDeletionContexts(localFs,
+ getLocalFiles(fConf, ""), jobId, user, taskController);
+ directoryCleanupThread.addToQueue(contexts);
+ }
+
+ /**
+ * Remove the tip and update all relevant state.
+ *
+ * @param tip {@link TaskInProgress} to be removed.
+ * @param wasFailure did the task fail or was it killed?
+ */
+ private void purgeTask(TaskInProgress tip, boolean wasFailure)
+ throws IOException {
+ if (tip != null) {
+ LOG.info("About to purge task: " + tip.getTask().getTaskID());
+
+ // Remove the task from running jobs,
+ // removing the job if it's the last task
+ removeTaskFromJob(tip.getTask().getJobID(), tip);
+ tip.jobHasFinished(wasFailure);
+ if (tip.getTask().isMapTask()) {
+ indexCache.removeMap(tip.getTask().getTaskID().toString());
+ }
+ }
+ }
+
+ /** Check if we're dangerously low on disk space
+ * If so, kill jobs to free up space and make sure
+ * we don't accept any new tasks
+ * Try killing the reduce jobs first, since I believe they
+ * use up most space
+ * Then pick the one with least progress
+ */
+ private void killOverflowingTasks() throws IOException {
+ long localMinSpaceKill;
+ synchronized(this){
+ localMinSpaceKill = minSpaceKill;
+ }
+ if (!enoughFreeSpace(localMinSpaceKill)) {
+ acceptNewTasks=false;
+ //we give up! do not accept new tasks until
+ //all the ones running have finished and they're all cleared up
+ synchronized (this) {
+ TaskInProgress killMe = findTaskToKill(null);
+
+ if (killMe!=null) {
+ String msg = "Tasktracker running out of space." +
+ " Killing task.";
+ LOG.info(killMe.getTask().getTaskID() + ": " + msg);
+ killMe.reportDiagnosticInfo(msg);
+ purgeTask(killMe, false);
+ }
+ }
+ }
+ }
+
+ /**
+ * Pick a task to kill to free up memory/disk-space
+ * @param tasksToExclude tasks that are to be excluded while trying to find a
+ * task to kill. If null, all runningTasks will be searched.
+ * @return the task to kill or null, if one wasn't found
+ */
+ synchronized TaskInProgress findTaskToKill(List<TaskAttemptID> tasksToExclude) {
+ TaskInProgress killMe = null;
+ for (Iterator it = runningTasks.values().iterator(); it.hasNext();) {
+ TaskInProgress tip = (TaskInProgress) it.next();
+
+ if (tasksToExclude != null
+ && tasksToExclude.contains(tip.getTask().getTaskID())) {
+ // exclude this task
+ continue;
+ }
+
+ if ((tip.getRunState() == TaskStatus.State.RUNNING ||
+ tip.getRunState() == TaskStatus.State.COMMIT_PENDING) &&
+ !tip.wasKilled) {
+
+ if (killMe == null) {
+ killMe = tip;
+
+ } else if (!tip.getTask().isMapTask()) {
+ //reduce task, give priority
+ if (killMe.getTask().isMapTask() ||
+ (tip.getTask().getProgress().get() <
+ killMe.getTask().getProgress().get())) {
+
+ killMe = tip;
+ }
+
+ } else if (killMe.getTask().isMapTask() &&
+ tip.getTask().getProgress().get() <
+ killMe.getTask().getProgress().get()) {
+ //map task, only add if the progress is lower
+
+ killMe = tip;
+ }
+ }
+ }
+ return killMe;
+ }
+
+ /**
+ * Check if any of the local directories has enough
+ * free space (more than minSpace)
+ *
+ * If not, do not try to get a new task assigned
+ * @return
+ * @throws IOException
+ */
+ private boolean enoughFreeSpace(long minSpace) throws IOException {
+ if (minSpace == 0) {
+ return true;
+ }
+ return minSpace < getFreeSpace();
+ }
+
+ private long getFreeSpace() throws IOException {
+ long biggestSeenSoFar = 0;
+ String[] localDirs = fConf.getLocalDirs();
+ for (int i = 0; i < localDirs.length; i++) {
+ DF df = null;
+ if (localDirsDf.containsKey(localDirs[i])) {
+ df = localDirsDf.get(localDirs[i]);
+ } else {
+ df = new DF(new File(localDirs[i]), fConf);
+ localDirsDf.put(localDirs[i], df);
+ }
+
+ long availOnThisVol = df.getAvailable();
+ if (availOnThisVol > biggestSeenSoFar) {
+ biggestSeenSoFar = availOnThisVol;
+ }
+ }
+
+ //Should ultimately hold back the space we expect running tasks to use but
+ //that estimate isn't currently being passed down to the TaskTrackers
+ return biggestSeenSoFar;
+ }
+
+ private TaskLauncher mapLauncher;
+ private TaskLauncher reduceLauncher;
+ public JvmManager getJvmManagerInstance() {
+ return jvmManager;
+ }
+
+ // called from unit test
+ void setJvmManagerInstance(JvmManager jvmManager) {
+ this.jvmManager = jvmManager;
+ }
+
+ private void addToTaskQueue(LaunchTaskAction action) {
+ if (action.getTask().isMapTask()) {
+ mapLauncher.addToTaskQueue(action);
+ } else {
+ reduceLauncher.addToTaskQueue(action);
+ }
+ }
+
+ // This method is called from unit tests
+ int getFreeSlots(boolean isMap) {
+ if (isMap) {
+ return mapLauncher.numFreeSlots.get();
+ } else {
+ return reduceLauncher.numFreeSlots.get();
+ }
+ }
+
+ class TaskLauncher extends Thread {
+ private IntWritable numFreeSlots;
+ private final int maxSlots;
+ private List<TaskInProgress> tasksToLaunch;
+
+ public TaskLauncher(TaskType taskType, int numSlots) {
+ this.maxSlots = numSlots;
+ this.numFreeSlots = new IntWritable(numSlots);
+ this.tasksToLaunch = new LinkedList<TaskInProgress>();
+ setDaemon(true);
+ setName("TaskLauncher for " + taskType + " tasks");
+ }
+
+ public void addToTaskQueue(LaunchTaskAction action) {
+ synchronized (tasksToLaunch) {
+ TaskInProgress tip = registerTask(action, this);
+ tasksToLaunch.add(tip);
+ tasksToLaunch.notifyAll();
+ }
+ }
+
+ public void cleanTaskQueue() {
+ tasksToLaunch.clear();
+ }
+
+ public void addFreeSlots(int numSlots) {
+ synchronized (numFreeSlots) {
+ numFreeSlots.set(numFreeSlots.get() + numSlots);
+ assert (numFreeSlots.get() <= maxSlots);
+ LOG.info("addFreeSlot : current free slots : " + numFreeSlots.get());
+ numFreeSlots.notifyAll();
+ }
+ }
+
+ void notifySlots() {
+ synchronized (numFreeSlots) {
+ numFreeSlots.notifyAll();
+ }
+ }
+
+ int getNumWaitingTasksToLaunch() {
+ synchronized (tasksToLaunch) {
+ return tasksToLaunch.size();
+ }
+ }
+
+ public void run() {
+ while (!Thread.interrupted()) {
+ try {
+ TaskInProgress tip;
+ Task task;
+ synchronized (tasksToLaunch) {
+ while (tasksToLaunch.isEmpty()) {
+ tasksToLaunch.wait();
+ }
+ //get the TIP
+ tip = tasksToLaunch.remove(0);
+ task = tip.getTask();
+ LOG.info("Trying to launch : " + tip.getTask().getTaskID() +
+ " which needs " + task.getNumSlotsRequired() + " slots");
+ }
+ //wait for free slots to run
+ synchronized (numFreeSlots) {
+ boolean canLaunch = true;
+ while (numFreeSlots.get() < task.getNumSlotsRequired()) {
+ //Make sure that there is no kill task action for this task!
+ //We are not locking tip here, because it would reverse the
+ //locking order!
+ //Also, Lock for the tip is not required here! because :
+ // 1. runState of TaskStatus is volatile
+ // 2. Any notification is not missed because notification is
+ // synchronized on numFreeSlots. So, while we are doing the check,
+ // if the tip is half way through the kill(), we don't miss
+ // notification for the following wait().
+ if (!tip.canBeLaunched()) {
+ //got killed externally while still in the launcher queue
+ LOG.info("Not blocking slots for " + task.getTaskID()
+ + " as it got killed externally. Task's state is "
+ + tip.getRunState());
+ canLaunch = false;
+ break;
+ }
+ LOG.info("TaskLauncher : Waiting for " + task.getNumSlotsRequired() +
+ " to launch " + task.getTaskID() + ", currently we have " +
+ numFreeSlots.get() + " free slots");
+ numFreeSlots.wait();
+ }
+ if (!canLaunch) {
+ continue;
+ }
+ LOG.info("In TaskLauncher, current free slots : " + numFreeSlots.get()+
+ " and trying to launch "+tip.getTask().getTaskID() +
+ " which needs " + task.getNumSlotsRequired() + " slots");
+ numFreeSlots.set(numFreeSlots.get() - task.getNumSlotsRequired());
+ assert (numFreeSlots.get() >= 0);
+ }
+ synchronized (tip) {
+ //to make sure that there is no kill task action for this
+ if (!tip.canBeLaunched()) {
+ //got killed externally while still in the launcher queue
+ LOG.info("Not launching task " + task.getTaskID() + " as it got"
+ + " killed externally. Task's state is " + tip.getRunState());
+ addFreeSlots(task.getNumSlotsRequired());
+ continue;
+ }
+ tip.slotTaken = true;
+ }
+ //got a free slot. launch the task
+ startNewTask(tip);
+ } catch (InterruptedException e) {
+ return; // ALL DONE
+ } catch (Throwable th) {
+ LOG.error("TaskLauncher error " +
+ StringUtils.stringifyException(th));
+ }
+ }
+ }
+ }
+ private TaskInProgress registerTask(LaunchTaskAction action,
+ TaskLauncher launcher) {
+ Task t = action.getTask();
+ LOG.info("LaunchTaskAction (registerTask): " + t.getTaskID() +
+ " task's state:" + t.getState());
+ TaskInProgress tip = new TaskInProgress(t, this.fConf, launcher);
+ synchronized (this) {
+ tasks.put(t.getTaskID(), tip);
+ runningTasks.put(t.getTaskID(), tip);
+ boolean isMap = t.isMapTask();
+ if (isMap) {
+ mapTotal++;
+ } else {
+ reduceTotal++;
+ }
+ }
+ return tip;
+ }
+ /**
+ * Start a new task.
+ * All exceptions are handled locally, so that we don't mess up the
+ * task tracker.
+ */
+ void startNewTask(final TaskInProgress tip) {
+ Thread launchThread = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ RunningJob rjob = localizeJob(tip);
+ // Localization is done. Neither rjob.jobConf nor rjob.ugi can be null
+ launchTaskForJob(tip, new JobConf(rjob.getJobConf()), rjob.ugi);
+ } catch (Throwable e) {
+ String msg = ("Error initializing " + tip.getTask().getTaskID() +
+ ":\n" + StringUtils.stringifyException(e));
+ LOG.warn(msg);
+ tip.reportDiagnosticInfo(msg);
+ try {
+ tip.kill(true);
+ tip.cleanup(true);
+ } catch (IOException ie2) {
+ LOG.info("Error cleaning up " + tip.getTask().getTaskID() + ":\n" +
+ StringUtils.stringifyException(ie2));
+ }
+ if (e instanceof Error) {
+ LOG.error("TaskLauncher error " +
+ StringUtils.stringifyException(e));
+ }
+ }
+ }
+ });
+ launchThread.start();
+
+ }
+
+ void addToMemoryManager(TaskAttemptID attemptId, boolean isMap,
+ JobConf conf) {
+ if (!isTaskMemoryManagerEnabled()) {
+ return; // Skip this if TaskMemoryManager is not enabled.
+ }
+ // Obtain physical memory limits from the job configuration
+ long physicalMemoryLimit =
+ conf.getLong(isMap ? MRJobConfig.MAP_MEMORY_PHYSICAL_MB :
+ MRJobConfig.REDUCE_MEMORY_PHYSICAL_MB,
+ JobConf.DISABLED_MEMORY_LIMIT);
+ if (physicalMemoryLimit > 0) {
+ physicalMemoryLimit *= 1024L * 1024L;
+ }
+
+ // Obtain virtual memory limits from the job configuration
+ long virtualMemoryLimit = isMap ?
+ conf.getMemoryForMapTask() * 1024 * 1024 :
+ conf.getMemoryForReduceTask() * 1024 * 1024;
+
+ taskMemoryManager.addTask(attemptId, virtualMemoryLimit,
+ physicalMemoryLimit);
+ }
+
+ void removeFromMemoryManager(TaskAttemptID attemptId) {
+ // Remove the entry from taskMemoryManagerThread's data structures.
+ if (isTaskMemoryManagerEnabled()) {
+ taskMemoryManager.removeTask(attemptId);
+ }
+ }
+
+ /**
+ * Notify the tasktracker to send an out-of-band heartbeat.
+ */
+ private void notifyTTAboutTaskCompletion() {
+ if (oobHeartbeatOnTaskCompletion) {
+ synchronized (finishedCount) {
+ int value = finishedCount.get();
+ finishedCount.set(value+1);
+ finishedCount.notify();
+ }
+ }
+ }
+
+ /**
+ * The server retry loop.
+ * This while-loop attempts to connect to the JobTracker. It only
+ * loops when the old TaskTracker has gone bad (its state is
+ * stale somehow) and we need to reinitialize everything.
+ */
+ public void run() {
+ try {
+ startCleanupThreads();
+ boolean denied = false;
+ while (running && !shuttingDown && !denied) {
+ boolean staleState = false;
+ try {
+ // This while-loop attempts reconnects if we get network errors
+ while (running && !staleState && !shuttingDown && !denied) {
+ try {
+ State osState = offerService();
+ if (osState == State.STALE) {
+ staleState = true;
+ } else if (osState == State.DENIED) {
+ denied = true;
+ }
+ } catch (Exception ex) {
+ if (!shuttingDown) {
+ LOG.info("Lost connection to JobTracker [" +
+ jobTrackAddr + "]. Retrying...", ex);
+ try {
+ Thread.sleep(5000);
+ } catch (InterruptedException ie) {
+ }
+ }
+ }
+ }
+ } finally {
+ close();
+ }
+ if (shuttingDown) { return; }
+ LOG.warn("Reinitializing local state");
+ initialize();
+ }
+ if (denied) {
+ shutdown();
+ }
+ } catch (IOException iex) {
+ LOG.error("Got fatal exception while reinitializing TaskTracker: " +
+ StringUtils.stringifyException(iex));
+ return;
+ }
+ catch (InterruptedException i) {
+ LOG.error("Got interrupted while reinitializing TaskTracker: " +
+ i.getMessage());
+ return;
+ }
+ }
+
+ ///////////////////////////////////////////////////////
+ // TaskInProgress maintains all the info for a Task that
+ // lives at this TaskTracker. It maintains the Task object,
+ // its TaskStatus, and the TaskRunner.
+ ///////////////////////////////////////////////////////
+ class TaskInProgress {
+ Task task;
+ long lastProgressReport;
+ StringBuffer diagnosticInfo = new StringBuffer();
+ private TaskRunner runner;
+ volatile boolean done = false;
+ volatile boolean wasKilled = false;
+ private JobConf defaultJobConf;
+ private JobConf localJobConf;
+ private boolean keepFailedTaskFiles;
+ private boolean alwaysKeepTaskFiles;
+ private TaskStatus taskStatus;
+ private long taskTimeout;
+ private String debugCommand;
+ private volatile boolean slotTaken = false;
+ private TaskLauncher launcher;
+
+ // The ugi of the user who is running the job. This contains all the tokens
+ // too which will be populated during job-localization
+ private UserGroupInformation ugi;
+
+ UserGroupInformation getUGI() {
+ return ugi;
+ }
+
+ void setUGI(UserGroupInformation userUGI) {
+ ugi = userUGI;
+ }
+
+ /**
+ */
+ public TaskInProgress(Task task, JobConf conf) {
+ this(task, conf, null);
+ }
+
+ public TaskInProgress(Task task, JobConf conf, TaskLauncher launcher) {
+ this.task = task;
+ this.launcher = launcher;
+ this.lastProgressReport = System.currentTimeMillis();
+ this.defaultJobConf = conf;
+ localJobConf = null;
+ taskStatus = TaskStatus.createTaskStatus(task.isMapTask(), task.getTaskID(),
+ 0.0f,
+ task.getNumSlotsRequired(),
+ task.getState(),
+ diagnosticInfo.toString(),
+ "initializing",
+ getName(),
+ task.isTaskCleanupTask() ?
+ TaskStatus.Phase.CLEANUP :
+ task.isMapTask()? TaskStatus.Phase.MAP:
+ TaskStatus.Phase.SHUFFLE,
+ task.getCounters());
+ taskTimeout = (10 * 60 * 1000);
+ }
+
+ void localizeTask(Task task) throws IOException{
+
+ FileSystem localFs = FileSystem.getLocal(fConf);
+
+ // create taskDirs on all the disks.
+ getLocalizer().initializeAttemptDirs(task.getUser(),
+ task.getJobID().toString(), task.getTaskID().toString(),
+ task.isTaskCleanupTask());
+
+ // create the working-directory of the task
+ Path cwd =
+ lDirAlloc.getLocalPathForWrite(getTaskWorkDir(task.getUser(), task
+ .getJobID().toString(), task.getTaskID().toString(), task
+ .isTaskCleanupTask()), defaultJobConf);
+ if (!localFs.mkdirs(cwd)) {
+ throw new IOException("Mkdirs failed to create "
+ + cwd.toString());
+ }
+
+ localJobConf.set(LOCAL_DIR,
+ fConf.get(LOCAL_DIR));
+
+ if (fConf.get(TT_HOST_NAME) != null) {
+ localJobConf.set(TT_HOST_NAME, fConf.get(TT_HOST_NAME));
+ }
+
+ keepFailedTaskFiles = localJobConf.getKeepFailedTaskFiles();
+
+ // Do the task-type specific localization
+ task.localizeConfiguration(localJobConf);
+
+ List<String[]> staticResolutions = NetUtils.getAllStaticResolutions();
+ if (staticResolutions != null && staticResolutions.size() > 0) {
+ StringBuffer str = new StringBuffer();
+
+ for (int i = 0; i < staticResolutions.size(); i++) {
+ String[] hostToResolved = staticResolutions.get(i);
+ str.append(hostToResolved[0]+"="+hostToResolved[1]);
+ if (i != staticResolutions.size() - 1) {
+ str.append(',');
+ }
+ }
+ localJobConf.set(TT_STATIC_RESOLUTIONS, str.toString());
+ }
+ if (task.isMapTask()) {
+ debugCommand = localJobConf.getMapDebugScript();
+ } else {
+ debugCommand = localJobConf.getReduceDebugScript();
+ }
+ String keepPattern = localJobConf.getKeepTaskFilesPattern();
+ if (keepPattern != null) {
+ alwaysKeepTaskFiles =
+ Pattern.matches(keepPattern, task.getTaskID().toString());
+ } else {
+ alwaysKeepTaskFiles = false;
+ }
+ if (debugCommand != null || localJobConf.getProfileEnabled() ||
+ alwaysKeepTaskFiles || keepFailedTaskFiles) {
+ //disable jvm reuse
+ localJobConf.setNumTasksToExecutePerJvm(1);
+ }
+ task.setConf(localJobConf);
+ }
+
+ /**
+ */
+ public Task getTask() {
+ return task;
+ }
+
+ TaskRunner getTaskRunner() {
+ return runner;
+ }
+
+ void setTaskRunner(TaskRunner rnr) {
+ this.runner = rnr;
+ }
+
+ public synchronized void setJobConf(JobConf lconf){
+ this.localJobConf = lconf;
+ keepFailedTaskFiles = localJobConf.getKeepFailedTaskFiles();
+ taskTimeout = localJobConf.getLong(MRJobConfig.TASK_TIMEOUT,
+ 10 * 60 * 1000);
+ }
+
+ public synchronized JobConf getJobConf() {
+ return localJobConf;
+ }
+
+ /**
+ */
+ public synchronized TaskStatus getStatus() {
+ taskStatus.setDiagnosticInfo(diagnosticInfo.toString());
+ if (diagnosticInfo.length() > 0) {
+ diagnosticInfo = new StringBuffer();
+ }
+
+ return taskStatus;
+ }
+
+ private TaskRunner createRunner(TaskTracker tracker, TaskInProgress task)
+ throws IOException {
+ if (task.getTask().isMapTask()) {
+ return new MapTaskRunner(task, tracker, task.getJobConf());
+ }
+ return new ReduceTaskRunner(task, tracker, task.getJobConf());
+ }
+
+ /**
+ * Kick off the task execution
+ */
+ public synchronized void launchTask() throws IOException {
+ if (this.taskStatus.getRunState() == TaskStatus.State.UNASSIGNED ||
+ this.taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN ||
+ this.taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN) {
+ localizeTask(task);
+ if (this.taskStatus.getRunState() == TaskStatus.State.UNASSIGNED) {
+ this.taskStatus.setRunState(TaskStatus.State.RUNNING);
+ }
+ setTaskRunner(createRunner(TaskTracker.this, this));
+ this.runner.start();
+ this.taskStatus.setStartTime(System.currentTimeMillis());
+ } else {
+ LOG.info("Not launching task: " + task.getTaskID() +
+ " since it's state is " + this.taskStatus.getRunState());
+ }
+ }
+
+ boolean isCleaningup() {
+ return this.taskStatus.inTaskCleanupPhase();
+ }
+
+ // checks if state has been changed for the task to be launched
+ boolean canBeLaunched() {
+ return (getRunState() == TaskStatus.State.UNASSIGNED ||
+ getRunState() == TaskStatus.State.FAILED_UNCLEAN ||
+ getRunState() == TaskStatus.State.KILLED_UNCLEAN);
+ }
+
+ /**
+ * The task is reporting its progress
+ */
+ public synchronized void reportProgress(TaskStatus taskStatus)
+ {
+ LOG.info(task.getTaskID() + " " + taskStatus.getProgress() +
+ "% " + taskStatus.getStateString());
+ // task will report its state as
+ // COMMIT_PENDING when it is waiting for commit response and
+ // when it is committing.
+ // cleanup attempt will report its state as FAILED_UNCLEAN/KILLED_UNCLEAN
+ if (this.done ||
+ (this.taskStatus.getRunState() != TaskStatus.State.RUNNING &&
+ this.taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING &&
+ !isCleaningup()) ||
+ ((this.taskStatus.getRunState() == TaskStatus.State.COMMIT_PENDING ||
+ this.taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN ||
+ this.taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN) &&
+ taskStatus.getRunState() == TaskStatus.State.RUNNING)) {
+ //make sure we ignore progress messages after a task has
+ //invoked TaskUmbilicalProtocol.done() or if the task has been
+ //KILLED/FAILED/FAILED_UNCLEAN/KILLED_UNCLEAN
+ //Also ignore progress update if the state change is from
+ //COMMIT_PENDING/FAILED_UNCLEAN/KILLED_UNCLEA to RUNNING
+ LOG.info(task.getTaskID() + " Ignoring status-update since " +
+ ((this.done) ? "task is 'done'" :
+ ("runState: " + this.taskStatus.getRunState()))
+ );
+ return;
+ }
+
+ this.taskStatus.statusUpdate(taskStatus);
+ this.lastProgressReport = System.currentTimeMillis();
+ }
+
+ /**
+ */
+ public long getLastProgressReport() {
+ return lastProgressReport;
+ }
+
+ /**
+ */
+ public TaskStatus.State getRunState() {
+ return taskStatus.getRunState();
+ }
+
+ /**
+ * The task's configured timeout.
+ *
+ * @return the task's configured timeout.
+ */
+ public long getTaskTimeout() {
+ return taskTimeout;
+ }
+
+ /**
+ * The task has reported some diagnostic info about its status
+ */
+ public synchronized void reportDiagnosticInfo(String info) {
+ this.diagnosticInfo.append(info);
+ }
+
+ public synchronized void reportNextRecordRange(SortedRanges.Range range) {
+ this.taskStatus.setNextRecordRange(range);
+ }
+
+ /**
+ * The task is reporting that it's done running
+ */
+ public synchronized void reportDone() {
+ if (isCleaningup()) {
+ if (this.taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN) {
+ this.taskStatus.setRunState(TaskStatus.State.FAILED);
+ } else if (this.taskStatus.getRunState() ==
+ TaskStatus.State.KILLED_UNCLEAN) {
+ this.taskStatus.setRunState(TaskStatus.State.KILLED);
+ }
+ } else {
+ this.taskStatus.setRunState(TaskStatus.State.SUCCEEDED);
+ }
+ this.taskStatus.setProgress(1.0f);
+ this.taskStatus.setFinishTime(System.currentTimeMillis());
+ this.done = true;
+ jvmManager.taskFinished(runner);
+ runner.signalDone();
+ LOG.info("Task " + task.getTaskID() + " is done.");
+ LOG.info("reported output size for " + task.getTaskID() + " was " + taskStatus.getOutputSize());
+ myInstrumentation.statusUpdate(task, taskStatus);
+ }
+
+ public boolean wasKilled() {
+ return wasKilled;
+ }
+
+ /**
+ * A task is reporting in as 'done'.
+ *
+ * We need to notify the tasktracker to send an out-of-band heartbeat.
+ * If isn't <code>commitPending</code>, we need to finalize the task
+ * and release the slot it's occupied.
+ *
+ * @param commitPending is the task-commit pending?
+ */
+ void reportTaskFinished(boolean commitPending) {
+ if (!commitPending) {
+ try {
+ taskFinished();
+ } finally {
+ releaseSlot();
+ }
+ }
+ notifyTTAboutTaskCompletion();
+ }
+
+ /* State changes:
+ * RUNNING/COMMIT_PENDING -> FAILED_UNCLEAN/FAILED/KILLED_UNCLEAN/KILLED
+ * FAILED_UNCLEAN -> FAILED
+ * KILLED_UNCLEAN -> KILLED
+ */
+ private void setTaskFailState(boolean wasFailure) {
+ // go FAILED_UNCLEAN -> FAILED and KILLED_UNCLEAN -> KILLED always
+ if (taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN) {
+ taskStatus.setRunState(TaskStatus.State.FAILED);
+ } else if (taskStatus.getRunState() ==
+ TaskStatus.State.KILLED_UNCLEAN) {
+ taskStatus.setRunState(TaskStatus.State.KILLED);
+ } else if (task.isMapOrReduce() &&
+ taskStatus.getPhase() != TaskStatus.Phase.CLEANUP) {
+ if (wasFailure) {
+ taskStatus.setRunState(TaskStatus.State.FAILED_UNCLEAN);
+ } else {
+ taskStatus.setRunState(TaskStatus.State.KILLED_UNCLEAN);
+ }
+ } else {
+ if (wasFailure) {
+ taskStatus.setRunState(TaskStatus.State.FAILED);
+ } else {
+ taskStatus.setRunState(TaskStatus.State.KILLED);
+ }
+ }
+ }
+
+ /**
+ * The task has actually finished running.
+ */
+ public void taskFinished() {
+ long start = System.currentTimeMillis();
+
+ //
+ // Wait until task reports as done. If it hasn't reported in,
+ // wait for a second and try again.
+ //
+ while (!done && (System.currentTimeMillis() - start < WAIT_FOR_DONE)) {
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException ie) {
+ }
+ }
+
+ //
+ // Change state to success or failure, depending on whether
+ // task was 'done' before terminating
+ //
+ boolean needCleanup = false;
+ synchronized (this) {
+ // Remove the task from MemoryManager, if the task SUCCEEDED or FAILED.
+ // KILLED tasks are removed in method kill(), because Kill
+ // would result in launching a cleanup attempt before
+ // TaskRunner returns; if remove happens here, it would remove
+ // wrong task from memory manager.
+ if (done || !wasKilled) {
+ removeFromMemoryManager(task.getTaskID());
+ }
+ if (!done) {
+ if (!wasKilled) {
+ failures += 1;
+ setTaskFailState(true);
+ // call the script here for the failed tasks.
+ if (debugCommand != null) {
+ try {
+ runDebugScript();
+ } catch (Exception e) {
+ String msg =
+ "Debug-script could not be run successfully : "
+ + StringUtils.stringifyException(e);
+ LOG.warn(msg);
+ reportDiagnosticInfo(msg);
+ }
+ }
+ }
+ taskStatus.setProgress(0.0f);
+ }
+ this.taskStatus.setFinishTime(System.currentTimeMillis());
+ needCleanup = (taskStatus.getRunState() == TaskStatus.State.FAILED ||
+ taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN ||
+ taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN ||
+ taskStatus.getRunState() == TaskStatus.State.KILLED);
+ }
+
+ //
+ // If the task has failed, or if the task was killAndCleanup()'ed,
+ // we should clean up right away. We only wait to cleanup
+ // if the task succeeded, and its results might be useful
+ // later on to downstream job processing.
+ //
+ if (needCleanup) {
+ removeTaskFromJob(task.getJobID(), this);
+ }
+
+ cleanup(needCleanup);
+ }
+
+ /**
+ * Run the debug-script now. Because debug-script can be user code, we use
+ * {@link TaskController} to execute the debug script.
+ *
+ * @throws IOException
+ */
+ private void runDebugScript() throws IOException {
+ String taskStdout ="";
+ String taskStderr ="";
+ String taskSyslog ="";
+ String jobConf = task.getJobFile();
+ try {
+ // get task's stdout file
+ taskStdout = FileUtil
+ .makeShellPath(TaskLog.getRealTaskLogFileLocation(task.getTaskID(),
+ task.isTaskCleanupTask(), TaskLog.LogName.STDOUT));
+ // get task's stderr file
+ taskStderr = FileUtil
+ .makeShellPath(TaskLog.getRealTaskLogFileLocation(task.getTaskID(),
+ task.isTaskCleanupTask(), TaskLog.LogName.STDERR));
+ // get task's syslog file
+ taskSyslog = FileUtil
+ .makeShellPath(TaskLog.getRealTaskLogFileLocation(task.getTaskID(),
+ task.isTaskCleanupTask(), TaskLog.LogName.SYSLOG));
+ } catch(Exception e){
+ LOG.warn("Exception finding task's stdout/err/syslog files", e);
+ }
+ File workDir = new File(lDirAlloc.getLocalPathToRead(
+ TaskTracker.getLocalTaskDir(task.getUser(), task.getJobID()
+ .toString(), task.getTaskID().toString(), task
+ .isTaskCleanupTask())
+ + Path.SEPARATOR + MRConstants.WORKDIR, localJobConf).toString());
+ // Build the command
+ File stdout = TaskLog.getTaskLogFile(task.getTaskID(), task
+ .isTaskCleanupTask(), TaskLog.LogName.DEBUGOUT);
+ // add pipes program as argument if it exists.
+ String program ="";
+ String executable = Submitter.getExecutable(localJobConf);
+ if ( executable != null) {
+ try {
+ program = new URI(executable).getFragment();
+ } catch (URISyntaxException ur) {
+ LOG.warn("Problem in the URI fragment for pipes executable");
+ }
+ }
+ String [] debug = debugCommand.split(" ");
+ List<String> vargs = new ArrayList<String>();
+ for (String component : debug) {
+ vargs.add(component);
+ }
+ vargs.add(taskStdout);
+ vargs.add(taskStderr);
+ vargs.add(taskSyslog);
+ vargs.add(jobConf);
+ vargs.add(program);
+ DebugScriptContext context =
+ new TaskController.DebugScriptContext();
+ context.args = vargs;
+ context.stdout = stdout;
+ context.workDir = workDir;
+ context.task = task;
+ getTaskController().runDebugScript(context);
+ // add the lines of debug out to diagnostics
+ int num = localJobConf.getInt(MRJobConfig.TASK_DEBUGOUT_LINES, -1);
+ addDiagnostics(FileUtil.makeShellPath(stdout), num, "DEBUG OUT");
+ }
+
+ /**
+ * Add last 'num' lines of the given file to the diagnostics.
+ * if num =-1, all the lines of file are added to the diagnostics.
+ * @param file The file from which to collect diagnostics.
+ * @param num The number of lines to be sent to diagnostics.
+ * @param tag The tag is printed before the diagnostics are printed.
+ */
+ public void addDiagnostics(String file, int num, String tag) {
+ RandomAccessFile rafile = null;
+ try {
+ rafile = new RandomAccessFile(file,"r");
+ int no_lines =0;
+ String line = null;
+ StringBuffer tail = new StringBuffer();
+ tail.append("\n-------------------- "+tag+"---------------------\n");
+ String[] lines = null;
+ if (num >0) {
+ lines = new String[num];
+ }
+ while ((line = rafile.readLine()) != null) {
+ no_lines++;
+ if (num >0) {
+ if (no_lines <= num) {
+ lines[no_lines-1] = line;
+ }
+ else { // shift them up
+ for (int i=0; i<num-1; ++i) {
+ lines[i] = lines[i+1];
+ }
+ lines[num-1] = line;
+ }
+ }
+ else if (num == -1) {
+ tail.append(line);
+ tail.append("\n");
+ }
+ }
+ int n = no_lines > num ?num:no_lines;
+ if (num >0) {
+ for (int i=0;i<n;i++) {
+ tail.append(lines[i]);
+ tail.append("\n");
+ }
+ }
+ if(n!=0)
+ reportDiagnosticInfo(tail.toString());
+ } catch (FileNotFoundException fnfe){
+ LOG.warn("File "+file+ " not found");
+ } catch (IOException ioe){
+ LOG.warn("Error reading file "+file);
+ } finally {
+ try {
+ if (rafile != null) {
+ rafile.close();
+ }
+ } catch (IOException ioe) {
+ LOG.warn("Error closing file "+file);
+ }
+ }
+ }
+
+ /**
+ * We no longer need anything from this task, as the job has
+ * finished. If the task is still running, kill it and clean up.
+ *
+ * @param wasFailure did the task fail, as opposed to was it killed by
+ * the framework
+ */
+ public void jobHasFinished(boolean wasFailure) throws IOException {
+ // Kill the task if it is still running
+ synchronized(this){
+ if (getRunState() == TaskStatus.State.RUNNING ||
+ getRunState() == TaskStatus.State.UNASSIGNED ||
+ getRunState() == TaskStatus.State.COMMIT_PENDING ||
+ isCleaningup()) {
+ kill(wasFailure);
+ }
+ }
+
+ // Cleanup on the finished task
+ cleanup(true);
+ }
+
+ /**
+ * Something went wrong and the task must be killed.
+ *
+ * @param wasFailure was it a failure (versus a kill request)?
+ */
+ public synchronized void kill(boolean wasFailure) throws IOException {
+ if (taskStatus.getRunState() == TaskStatus.State.RUNNING ||
+ taskStatus.getRunState() == TaskStatus.State.COMMIT_PENDING ||
+ isCleaningup()) {
+ wasKilled = true;
+ if (wasFailure) {
+ failures += 1;
+ }
+ // runner could be null if task-cleanup attempt is not localized yet
+ if (runner != null) {
+ runner.kill();
+ }
+ setTaskFailState(wasFailure);
+ } else if (taskStatus.getRunState() == TaskStatus.State.UNASSIGNED) {
+ if (wasFailure) {
+ failures += 1;
+ taskStatus.setRunState(TaskStatus.State.FAILED);
+ } else {
+ taskStatus.setRunState(TaskStatus.State.KILLED);
+ }
+ }
+ taskStatus.setFinishTime(System.currentTimeMillis());
+ removeFromMemoryManager(task.getTaskID());
+ releaseSlot();
+ myInstrumentation.statusUpdate(task, taskStatus);
+ notifyTTAboutTaskCompletion();
+ }
+
+ private synchronized void releaseSlot() {
+ if (slotTaken) {
+ if (launcher != null) {
+ launcher.addFreeSlots(task.getNumSlotsRequired());
+ }
+ slotTaken = false;
+ } else {
+ // wake up the launcher. it may be waiting to block slots for this task.
+ if (launcher != null) {
+ launcher.notifySlots();
+ }
+ }
+ }
+
+ /**
+ * The map output has been lost.
+ */
+ private synchronized void mapOutputLost(String failure
+ ) throws IOException {
+ if (taskStatus.getRunState() == TaskStatus.State.COMMIT_PENDING ||
+ taskStatus.getRunState() == TaskStatus.State.SUCCEEDED) {
+ // change status to failure
+ LOG.info("Reporting output lost:"+task.getTaskID());
+ taskStatus.setRunState(TaskStatus.State.FAILED);
+ taskStatus.setProgress(0.0f);
+ reportDiagnosticInfo("Map output lost, rescheduling: " +
+ failure);
+ runningTasks.put(task.getTaskID(), this);
+ mapTotal++;
+ myInstrumentation.statusUpdate(task, taskStatus);
+ } else {
+ LOG.warn("Output already reported lost:"+task.getTaskID());
+ }
+ }
+
+ /**
+ * We no longer need anything from this task. Either the
+ * controlling job is all done and the files have been copied
+ * away, or the task failed and we don't need the remains.
+ * Any calls to cleanup should not lock the tip first.
+ * cleanup does the right thing- updates tasks in Tasktracker
+ * by locking tasktracker first and then locks the tip.
+ *
+ * if needCleanup is true, the whole task directory is cleaned up.
+ * otherwise the current working directory of the task
+ * i.e. <taskid>/work is cleaned up.
+ */
+ void cleanup(boolean needCleanup) {
+ TaskAttemptID taskId = task.getTaskID();
+ LOG.debug("Cleaning up " + taskId);
+
+
+ synchronized (TaskTracker.this) {
+ if (needCleanup) {
+ // see if tasks data structure is holding this tip.
+ // tasks could hold the tip for cleanup attempt, if cleanup attempt
+ // got launched before this method.
+ if (tasks.get(taskId) == this) {
+ tasks.remove(taskId);
+ }
+ }
+ synchronized (this){
+ if (alwaysKeepTaskFiles ||
+ (taskStatus.getRunState() == TaskStatus.State.FAILED &&
+ keepFailedTaskFiles)) {
+ return;
+ }
+ }
+ }
+ synchronized (this) {
+ // localJobConf could be null if localization has not happened
+ // then no cleanup will be required.
+ if (localJobConf == null) {
+ return;
+ }
+ try {
+ removeTaskFiles(needCleanup, taskId);
+ } catch (Throwable ie) {
+ LOG.info("Error cleaning up task runner: "
+ + StringUtils.stringifyException(ie));
+ }
+ }
+ }
+
+ /**
+ * Some or all of the files from this task are no longer required. Remove
+ * them via CleanupQueue.
+ *
+ * @param needCleanup
+ * @param taskId
+ * @throws IOException
+ */
+ void removeTaskFiles(boolean needCleanup, TaskAttemptID taskId)
+ throws IOException {
+ if (needCleanup) {
+ if (runner != null) {
+ // cleans up the output directory of the task (where map outputs
+ // and reduce inputs get stored)
+ runner.close();
+ }
+
+ if (localJobConf.getNumTasksToExecutePerJvm() == 1) {
+ // No jvm reuse, remove everything
+ PathDeletionContext[] contexts =
+ buildTaskControllerTaskPathDeletionContexts(localFs,
+ getLocalFiles(fConf, ""), task, false/* not workDir */,
+ taskController);
+ directoryCleanupThread.addToQueue(contexts);
+ } else {
+ // Jvm reuse. We don't delete the workdir since some other task
+ // (running in the same JVM) might be using the dir. The JVM
+ // running the tasks would clean the workdir per a task in the
+ // task process itself.
+ String localTaskDir =
+ getLocalTaskDir(task.getUser(), task.getJobID().toString(), taskId
+ .toString(), task.isTaskCleanupTask());
+ PathDeletionContext[] contexts = buildPathDeletionContexts(
+ localFs, getLocalFiles(defaultJobConf, localTaskDir +
+ Path.SEPARATOR + TaskTracker.JOBFILE));
+ directoryCleanupThread.addToQueue(contexts);
+ }
+ } else {
+ if (localJobConf.getNumTasksToExecutePerJvm() == 1) {
+ PathDeletionContext[] contexts =
+ buildTaskControllerTaskPathDeletionContexts(localFs,
+ getLocalFiles(fConf, ""), task, true /* workDir */,
+ taskController);
+ directoryCleanupThread.addToQueue(contexts);
+ }
+ }
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return (obj instanceof TaskInProgress) &&
+ task.getTaskID().equals
+ (((TaskInProgress) obj).getTask().getTaskID());
+ }
+
+ @Override
+ public int hashCode() {
+ return task.getTaskID().hashCode();
+ }
+ }
+
+ /**
+ * Check that the current UGI is the JVM authorized to report
+ * for this particular job.
+ *
+ * @throws IOException for unauthorized access
+ */
+ private void ensureAuthorizedJVM(JobID jobId) throws IOException {
+ String currentJobId =
+ UserGroupInformation.getCurrentUser().getUserName();
+ if (!currentJobId.equals(jobId.toString())) {
+ throw new IOException ("JVM with " + currentJobId +
+ " is not authorized for " + jobId);
+ }
+ }
+
+
+ // ///////////////////////////////////////////////////////////////
+ // TaskUmbilicalProtocol
+ /////////////////////////////////////////////////////////////////
+
+ /**
+ * Called upon startup by the child process, to fetch Task data.
+ */
+ public synchronized JvmTask getTask(JvmContext context)
+ throws IOException {
+ ensureAuthorizedJVM(context.jvmId.getJobId());
+ JVMId jvmId = context.jvmId;
+
+ // save pid of task JVM sent by child
+ jvmManager.setPidToJvm(jvmId, context.pid);
+
+ LOG.debug("JVM with ID : " + jvmId + " asked for a task");
+ if (!jvmManager.isJvmKnown(jvmId)) {
+ LOG.info("Killing unknown JVM " + jvmId);
+ return new JvmTask(null, true);
+ }
+ RunningJob rjob = runningJobs.get(jvmId.getJobId());
+ if (rjob == null) { //kill the JVM since the job is dead
+ LOG.info("Killing JVM " + jvmId + " since job " + jvmId.getJobId() +
+ " is dead");
+ jvmManager.killJvm(jvmId);
+ return new JvmTask(null, true);
+ }
+ TaskInProgress tip = jvmManager.getTaskForJvm(jvmId);
+ if (tip == null) {
+ return new JvmTask(null, false);
+ }
+ if (tasks.get(tip.getTask().getTaskID()) != null) { //is task still present
+ LOG.info("JVM with ID: " + jvmId + " given task: " +
+ tip.getTask().getTaskID());
+ return new JvmTask(tip.getTask(), false);
+ } else {
+ LOG.info("Killing JVM with ID: " + jvmId + " since scheduled task: " +
+ tip.getTask().getTaskID() + " is " + tip.taskStatus.getRunState());
+ return new JvmTask(null, true);
+ }
+ }
+
+ /**
+ * Called periodically to report Task progress, from 0.0 to 1.0.
+ */
+ public synchronized boolean statusUpdate(TaskAttemptID taskid,
+ TaskStatus taskStatus)
+ throws IOException {
+ ensureAuthorizedJVM(taskid.getJobID());
+ TaskInProgress tip = tasks.get(taskid);
+ if (tip != null) {
+ tip.reportProgress(taskStatus);
+ myInstrumentation.statusUpdate(tip.getTask(), taskStatus);
+ return true;
+ } else {
+ LOG.warn("Progress from unknown child task: "+taskid);
+ return false;
+ }
+ }
+
+ /**
+ * Called when the task dies before completion, and we want to report back
+ * diagnostic info
+ */
+ public synchronized void reportDiagnosticInfo(TaskAttemptID taskid, String info) throws IOException {
+ ensureAuthorizedJVM(taskid.getJobID());
+ internalReportDiagnosticInfo(taskid, info);
+ }
+
+ /**
+ * Same as reportDiagnosticInfo but does not authorize caller. This is used
+ * internally within MapReduce, whereas reportDiagonsticInfo may be called
+ * via RPC.
+ */
+ synchronized void internalReportDiagnosticInfo(TaskAttemptID taskid, String info) throws IOException {
+ TaskInProgress tip = tasks.get(taskid);
+ if (tip != null) {
+ tip.reportDiagnosticInfo(info);
+ } else {
+ LOG.warn("Error from unknown child task: "+taskid+". Ignored.");
+ }
+ }
+
+ public synchronized void reportNextRecordRange(TaskAttemptID taskid,
+ SortedRanges.Range range) throws IOException {
+ ensureAuthorizedJVM(taskid.getJobID());
+ TaskInProgress tip = tasks.get(taskid);
+ if (tip != null) {
+ tip.reportNextRecordRange(range);
+ } else {
+ LOG.warn("reportNextRecordRange from unknown child task: "+taskid+". " +
+ "Ignored.");
+ }
+ }
+
+ /** Child checking to see if we're alive. Normally does nothing.*/
+ public synchronized boolean ping(TaskAttemptID taskid) throws IOException {
+ ensureAuthorizedJVM(taskid.getJobID());
+ return tasks.get(taskid) != null;
+ }
+
+ /**
+ * Task is reporting that it is in commit_pending
+ * and it is waiting for the commit Response
+ */
+ public synchronized void commitPending(TaskAttemptID taskid,
+ TaskStatus taskStatus)
+ throws IOException {
+ ensureAuthorizedJVM(taskid.getJobID());
+ LOG.info("Task " + taskid + " is in commit-pending," +"" +
+ " task state:" +taskStatus.getRunState());
+ statusUpdate(taskid, taskStatus);
+ reportTaskFinished(taskid, true);
+ }
+
+ /**
+ * Child checking whether it can commit
+ */
+ public synchronized boolean canCommit(TaskAttemptID taskid) {
+ return commitResponses.contains(taskid); //don't remove it now
+ }
+
+ /**
+ * The task is done.
+ */
+ public synchronized void done(TaskAttemptID taskid)
+ throws IOException {
+ ensureAuthorizedJVM(taskid.getJobID());
+ TaskInProgress tip = tasks.get(taskid);
+ commitResponses.remove(taskid);
+ if (tip != null) {
+ tip.reportDone();
+ } else {
+ LOG.warn("Unknown child task done: "+taskid+". Ignored.");
+ }
+ }
+
+
+ /**
+ * A reduce-task failed to shuffle the map-outputs. Kill the task.
+ */
+ public synchronized void shuffleError(TaskAttemptID taskId, String message)
+ throws IOException {
+ ensureAuthorizedJVM(taskId.getJobID());
+ LOG.fatal("Task: " + taskId + " - Killed due to Shuffle Failure: " + message);
+ TaskInProgress tip = runningTasks.get(taskId);
+ tip.reportDiagnosticInfo("Shuffle Error: " + message);
+ purgeTask(tip, true);
+ }
+
+ /**
+ * A child task had a local filesystem error. Kill the task.
+ */
+ public synchronized void fsError(TaskAttemptID taskId, String message)
+ throws IOException {
+ ensureAuthorizedJVM(taskId.getJobID());
+ internalFsError(taskId, message);
+ }
+
+ /**
+ * Version of fsError() that does not do authorization checks, called by
+ * the TaskRunner.
+ */
+ synchronized void internalFsError(TaskAttemptID taskId, String message)
+ throws IOException {
+ LOG.fatal("Task: " + taskId + " - Killed due to FSError: " + message);
+ TaskInProgress tip = runningTasks.get(taskId);
+ tip.reportDiagnosticInfo("FSError: " + message);
+ purgeTask(tip, true);
+ }
+
+ /**
+ * A child task had a fatal error. Kill the task.
+ */
+ public synchronized void fatalError(TaskAttemptID taskId, String msg)
+ throws IOException {
+ ensureAuthorizedJVM(taskId.getJobID());
+ LOG.fatal("Task: " + taskId + " - exited : " + msg);
+ TaskInProgress tip = runningTasks.get(taskId);
+ tip.reportDiagnosticInfo("Error: " + msg);
+ purgeTask(tip, true);
+ }
+
+ public synchronized MapTaskCompletionEventsUpdate getMapCompletionEvents(
+ JobID jobId, int fromEventId, int maxLocs, TaskAttemptID id)
+ throws IOException {
+ TaskCompletionEvent[]mapEvents = TaskCompletionEvent.EMPTY_ARRAY;
+ synchronized (shouldReset) {
+ if (shouldReset.remove(id)) {
+ return new MapTaskCompletionEventsUpdate(mapEvents, true);
+ }
+ }
+ RunningJob rjob;
+ synchronized (runningJobs) {
+ rjob = runningJobs.get(jobId);
+ if (rjob != null) {
+ synchronized (rjob) {
+ FetchStatus f = rjob.getFetchStatus();
+ if (f != null) {
+ mapEvents = f.getMapEvents(fromEventId, maxLocs);
+ }
+ }
+ }
+ }
+ return new MapTaskCompletionEventsUpdate(mapEvents, false);
+ }
+
+ /////////////////////////////////////////////////////
+ // Called by TaskTracker thread after task process ends
+ /////////////////////////////////////////////////////
+ /**
+ * The task is no longer running. It may not have completed successfully
+ */
+ void reportTaskFinished(TaskAttemptID taskid, boolean commitPending) {
+ TaskInProgress tip;
+ synchronized (this) {
+ tip = tasks.get(taskid);
+ }
+ if (tip != null) {
+ tip.reportTaskFinished(commitPending);
+ } else {
+ LOG.warn("Unknown child task finished: "+taskid+". Ignored.");
+ }
+ }
+
+
+ /**
+ * A completed map task's output has been lost.
+ */
+ public synchronized void mapOutputLost(TaskAttemptID taskid,
+ String errorMsg) throws IOException {
+ TaskInProgress tip = tasks.get(taskid);
+ if (tip != null) {
+ tip.mapOutputLost(errorMsg);
+ } else {
+ LOG.warn("Unknown child with bad map output: "+taskid+". Ignored.");
+ }
+ }
+
+ /**
+ * The datastructure for initializing a job
+ */
+ static class RunningJob{
+ private JobID jobid;
+ private JobConf jobConf;
+ // keep this for later use
+ volatile Set<TaskInProgress> tasks;
+ boolean localized;
+ boolean keepJobFiles;
+ UserGroupInformation ugi;
+ FetchStatus f;
+ RunningJob(JobID jobid) {
+ this.jobid = jobid;
+ localized = false;
+ tasks = new HashSet<TaskInProgress>();
+ keepJobFiles = false;
+ }
+
+ JobID getJobID() {
+ return jobid;
+ }
+
+ UserGroupInformation getUGI() {
+ return ugi;
+ }
+
+ void setFetchStatus(FetchStatus f) {
+ this.f = f;
+ }
+
+ FetchStatus getFetchStatus() {
+ return f;
+ }
+
+ JobConf getJobConf() {
+ return jobConf;
+ }
+ }
+
+ /**
+ * Get the name for this task tracker.
+ * @return the string like "tracker_mymachine:50010"
+ */
+ String getName() {
+ return taskTrackerName;
+ }
+
+ private synchronized List<TaskStatus> cloneAndResetRunningTaskStatuses(
+ boolean sendCounters) {
+ List<TaskStatus> result = new ArrayList<TaskStatus>(runningTasks.size());
+ for(TaskInProgress tip: runningTasks.values()) {
+ TaskStatus status = tip.getStatus();
+ status.setIncludeAllCounters(sendCounters);
+ // send counters for finished or failed tasks and commit pending tasks
+ if (status.getRunState() != TaskStatus.State.RUNNING) {
+ status.setIncludeAllCounters(true);
+ }
+ result.add((TaskStatus)status.clone());
+ status.clearStatus();
+ }
+ return result;
+ }
+ /**
+ * Get the list of tasks that will be reported back to the
+ * job tracker in the next heartbeat cycle.
+ * @return a copy of the list of TaskStatus objects
+ */
+ synchronized List<TaskStatus> getRunningTaskStatuses() {
+ List<TaskStatus> result = new ArrayList<TaskStatus>(runningTasks.size());
+ for(TaskInProgress tip: runningTasks.values()) {
+ result.add(tip.getStatus());
+ }
+ return result;
+ }
+
+ /**
+ * Get the list of stored tasks on this task tracker.
+ * @return
+ */
+ synchronized List<TaskStatus> getNonRunningTasks() {
+ List<TaskStatus> result = new ArrayList<TaskStatus>(tasks.size());
+ for(Map.Entry<TaskAttemptID, TaskInProgress> task: tasks.entrySet()) {
+ if (!runningTasks.containsKey(task.getKey())) {
+ result.add(task.getValue().getStatus());
+ }
+ }
+ return result;
+ }
+
+
+ /**
+ * Get the list of tasks from running jobs on this task tracker.
+ * @return a copy of the list of TaskStatus objects
+ */
+ synchronized List<TaskStatus> getTasksFromRunningJobs() {
+ List<TaskStatus> result = new ArrayList<TaskStatus>(tasks.size());
+ for (Map.Entry <JobID, RunningJob> item : runningJobs.entrySet()) {
+ RunningJob rjob = item.getValue();
+ synchronized (rjob) {
+ for (TaskInProgress tip : rjob.tasks) {
+ result.add(tip.getStatus());
+ }
+ }
+ }
+ return result;
+ }
+
+ /**
+ * Get the default job conf for this tracker.
+ */
+ JobConf getJobConf() {
+ return fConf;
+ }
+
+ /**
+ * Check if the given local directories
+ * (and parent directories, if necessary) can be created.
+ * @param localDirs where the new TaskTracker should keep its local files.
+ * @throws DiskErrorException if all local directories are not writable
+ */
+ private static void checkLocalDirs(String[] localDirs)
+ throws DiskErrorException {
+ boolean writable = false;
+
+ if (localDirs != null) {
+ for (int i = 0; i < localDirs.length; i++) {
+ try {
+ DiskChecker.checkDir(new File(localDirs[i]));
+ writable = true;
+ } catch(DiskErrorException e) {
+ LOG.warn("Task Tracker local " + e.getMessage());
+ }
+ }
+ }
+
+ if (!writable)
+ throw new DiskErrorException(
+ "all local directories are not writable");
+ }
+
+ /**
+ * Is this task tracker idle?
+ * @return has this task tracker finished and cleaned up all of its tasks?
+ */
+ public synchronized boolean isIdle() {
+ return tasks.isEmpty() && tasksToCleanup.isEmpty();
+ }
+
+ /**
+ * Start the TaskTracker, point toward the indicated JobTracker
+ */
+ public static void main(String argv[]) throws Exception {
+ StringUtils.startupShutdownMessage(TaskTracker.class, argv, LOG);
+ if (argv.length != 0) {
+ System.out.println("usage: TaskTracker");
+ System.exit(-1);
+ }
+ try {
+ JobConf conf=new JobConf();
+ // enable the server to track time spent waiting on locks
+ ReflectionUtils.setContentionTracing
+ (conf.getBoolean(TT_CONTENTION_TRACKING, false));
+ new TaskTracker(conf).run();
+ } catch (Throwable e) {
+ LOG.error("Can not start task tracker because "+
+ StringUtils.stringifyException(e));
+ System.exit(-1);
+ }
+ }
+
+ /**
+ * This class is used in TaskTracker's Jetty to serve the map outputs
+ * to other nodes.
+ */
+ @InterfaceAudience.Private
+ @InterfaceStability.Unstable
+ public static class MapOutputServlet extends HttpServlet {
+ private static final long serialVersionUID = 1L;
+ private static final int MAX_BYTES_TO_READ = 64 * 1024;
+ @Override
+ public void doGet(HttpServletRequest request,
+ HttpServletResponse response
+ ) throws ServletException, IOException {
+ long start = System.currentTimeMillis();
+ String mapIds = request.getParameter("map");
+ String reduceId = request.getParameter("reduce");
+ String jobId = request.getParameter("job");
+
+ LOG.debug("Shuffle started for maps (mapIds=" + mapIds + ") to reduce " +
+ reduceId);
+
+ if (jobId == null) {
+ throw new IOException("job parameter is required");
+ }
+
+ if (mapIds == null || reduceId == null) {
+ throw new IOException("map and reduce parameters are required");
+ }
+
+ ServletContext context = getServletContext();
+ int reduce = Integer.parseInt(reduceId);
+ DataOutputStream outStream = null;
+
+ ShuffleServerMetrics shuffleMetrics =
+ (ShuffleServerMetrics) context.getAttribute("shuffleServerMetrics");
+ TaskTracker tracker =
+ (TaskTracker) context.getAttribute("task.tracker");
+ String exceptionStackRegex =
+ (String) context.getAttribute("exceptionStackRegex");
+ String exceptionMsgRegex =
+ (String) context.getAttribute("exceptionMsgRegex");
+
+ verifyRequest(request, response, tracker, jobId);
+
+ int numMaps = 0;
+ try {
+ shuffleMetrics.serverHandlerBusy();
+ response.setContentType("application/octet-stream");
+
+ outStream = new DataOutputStream(response.getOutputStream());
+ //use the same buffersize as used for reading the data from disk
+ response.setBufferSize(MAX_BYTES_TO_READ);
+ JobConf conf = (JobConf) context.getAttribute("conf");
+ LocalDirAllocator lDirAlloc =
+ (LocalDirAllocator)context.getAttribute("localDirAllocator");
+ FileSystem rfs = ((LocalFileSystem)
+ context.getAttribute("local.file.system")).getRaw();
+
+ // Split the map ids, send output for one map at a time
+ StringTokenizer itr = new StringTokenizer(mapIds, ",");
+ while(itr.hasMoreTokens()) {
+ String mapId = itr.nextToken();
+ ++numMaps;
+ sendMapFile(jobId, mapId, reduce, conf, outStream,
+ tracker, lDirAlloc, shuffleMetrics, rfs);
+ }
+ } catch (IOException ie) {
+ Log log = (Log) context.getAttribute("log");
+ String errorMsg = ("getMapOutputs(" + mapIds + "," + reduceId +
+ ") failed");
+ log.warn(errorMsg, ie);
+ checkException(ie, exceptionMsgRegex, exceptionStackRegex, shuffleMetrics);
+ response.sendError(HttpServletResponse.SC_GONE, errorMsg);
+ shuffleMetrics.failedOutput();
+ throw ie;
+ } finally {
+ shuffleMetrics.serverHandlerFree();
+ }
+ outStream.close();
+ shuffleMetrics.successOutput();
+ long timeElapsed = (System.currentTimeMillis()-start);
+ LOG.info("Shuffled " + numMaps
+ + "maps (mapIds=" + mapIds + ") to reduce "
+ + reduceId + " in " + timeElapsed + "s");
+
+ if (ClientTraceLog.isInfoEnabled()) {
+ ClientTraceLog.info(String.format(MR_CLIENTTRACE_FORMAT,
+ request.getLocalAddr() + ":" + request.getLocalPort(),
+ request.getRemoteAddr() + ":" + request.getRemotePort(),
+ numMaps, "MAPRED_SHUFFLE", reduceId,
+ timeElapsed));
+ }
+ }
+
+ protected void checkException(IOException ie, String exceptionMsgRegex,
+ String exceptionStackRegex, ShuffleServerMetrics shuffleMetrics) {
+ // parse exception to see if it looks like a regular expression you
+ // configure. If both msgRegex and StackRegex set then make sure both
+ // match, otherwise only the one set has to match.
+ if (exceptionMsgRegex != null) {
+ String msg = ie.getMessage();
+ if (msg == null || !msg.matches(exceptionMsgRegex)) {
+ return;
+ }
+ }
+ if (exceptionStackRegex != null
+ && !checkStackException(ie, exceptionStackRegex)) {
+ return;
+ }
+ shuffleMetrics.exceptionsCaught();
+ }
+
+ private boolean checkStackException(IOException ie,
+ String exceptionStackRegex) {
+ StackTraceElement[] stack = ie.getStackTrace();
+
+ for (StackTraceElement elem : stack) {
+ String stacktrace = elem.toString();
+ if (stacktrace.matches(exceptionStackRegex)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private void sendMapFile(String jobId, String mapId,
+ int reduce,
+ Configuration conf,
+ DataOutputStream outStream,
+ TaskTracker tracker,
+ LocalDirAllocator lDirAlloc,
+ ShuffleServerMetrics shuffleMetrics,
+ FileSystem localfs
+ ) throws IOException {
+
+ LOG.debug("sendMapFile called for " + mapId + " to reduce " + reduce);
+
+ // true iff IOException was caused by attempt to access input
+ boolean isInputException = false;
+ FileInputStream mapOutputIn = null;
+ byte[] buffer = new byte[MAX_BYTES_TO_READ];
+ long totalRead = 0;
+
+ String userName = null;
+ String runAsUserName = null;
+ synchronized (tracker.runningJobs) {
+ RunningJob rjob = tracker.runningJobs.get(JobID.forName(jobId));
+ if (rjob == null) {
+ throw new IOException("Unknown job " + jobId + "!!");
+ }
+ userName = rjob.jobConf.getUser();
+ runAsUserName = tracker.getTaskController().getRunAsUser(rjob.jobConf);
+ }
+ // Index file
+ Path indexFileName =
+ lDirAlloc.getLocalPathToRead(TaskTracker.getIntermediateOutputDir(
+ userName, jobId, mapId)
+ + "/file.out.index", conf);
+
+ // Map-output file
+ Path mapOutputFileName =
+ lDirAlloc.getLocalPathToRead(TaskTracker.getIntermediateOutputDir(
+ userName, jobId, mapId)
+ + "/file.out", conf);
+
+ /**
+ * Read the index file to get the information about where the map-output
+ * for the given reducer is available.
+ */
+ IndexRecord info =
+ tracker.indexCache.getIndexInformation(mapId, reduce, indexFileName,
+ runAsUserName);
+
+ try {
+ /**
+ * Read the data from the single map-output file and
+ * send it to the reducer.
+ */
+ //open the map-output file
+ mapOutputIn = SecureIOUtils.openForRead(
+ new File(mapOutputFileName.toUri().getPath()), runAsUserName, null);
+ //seek to the correct offset for the reduce
+ IOUtils.skipFully(mapOutputIn, info.startOffset);
+
+ // write header for each map output
+ ShuffleHeader header = new ShuffleHeader(mapId, info.partLength,
+ info.rawLength, reduce);
+ header.write(outStream);
+
+ // read the map-output and stream it out
+ isInputException = true;
+ long rem = info.partLength;
+ if (rem == 0) {
+ throw new IOException("Illegal partLength of 0 for mapId " + mapId +
+ " to reduce " + reduce);
+ }
+ int len =
+ mapOutputIn.read(buffer, 0, (int)Math.min(rem, MAX_BYTES_TO_READ));
+ long now = 0;
+ while (len >= 0) {
+ rem -= len;
+ try {
+ shuffleMetrics.outputBytes(len);
+
+ if (len > 0) {
+ outStream.write(buffer, 0, len);
+ } else {
+ LOG.info("Skipped zero-length read of map " + mapId +
+ " to reduce " + reduce);
+ }
+
+ } catch (IOException ie) {
+ isInputException = false;
+ throw ie;
+ }
+ totalRead += len;
+ if (rem == 0) {
+ break;
+ }
+ len =
+ mapOutputIn.read(buffer, 0, (int)Math.min(rem, MAX_BYTES_TO_READ));
+ }
+ try {
+ outStream.flush();
+ } catch (IOException ie) {
+ isInputException = false;
+ throw ie;
+ }
+ } catch (IOException ie) {
+ String errorMsg = "error on sending map " + mapId + " to reduce " +
+ reduce;
+ if (isInputException) {
+ tracker.mapOutputLost(TaskAttemptID.forName(mapId), errorMsg +
+ StringUtils.stringifyException(ie));
+ }
+ throw new IOException(errorMsg, ie);
+ } finally {
+ if (mapOutputIn != null) {
+ try {
+ mapOutputIn.close();
+ } catch (IOException ioe) {
+ LOG.info("problem closing map output file", ioe);
+ }
+ }
+ }
+
+ LOG.info("Sent out " + totalRead + " bytes to reduce " + reduce +
+ " from map: " + mapId + " given " + info.partLength + "/" +
+ info.rawLength);
+ }
+
+ /**
+ * verify that request has correct HASH for the url
+ * and also add a field to reply header with hash of the HASH
+ * @param request
+ * @param response
+ * @param jt the job token
+ * @throws IOException
+ */
+ private void verifyRequest(HttpServletRequest request,
+ HttpServletResponse response, TaskTracker tracker, String jobId)
+ throws IOException {
+ SecretKey tokenSecret = tracker.getJobTokenSecretManager()
+ .retrieveTokenSecret(jobId);
+ // string to encrypt
+ String enc_str = SecureShuffleUtils.buildMsgFrom(request);
+
+ // hash from the fetcher
+ String urlHashStr = request.getHeader(SecureShuffleUtils.HTTP_HEADER_URL_HASH);
+ if(urlHashStr == null) {
+ response.sendError(HttpServletResponse.SC_UNAUTHORIZED);
+ throw new IOException("fetcher cannot be authenticated");
+ }
+ int len = urlHashStr.length();
+ LOG.debug("verifying request. enc_str="+enc_str+"; hash=..."+
+ urlHashStr.substring(len-len/2, len-1)); // half of the hash for debug
+
+ // verify - throws exception
+ try {
+ SecureShuffleUtils.verifyReply(urlHashStr, enc_str, tokenSecret);
+ } catch (IOException ioe) {
+ response.sendError(HttpServletResponse.SC_UNAUTHORIZED);
+ throw ioe;
+ }
+
+ // verification passed - encode the reply
+ String reply = SecureShuffleUtils.generateHash(urlHashStr.getBytes(), tokenSecret);
+ response.addHeader(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH, reply);
+
+ len = reply.length();
+ LOG.debug("Fetcher request verfied. enc_str="+enc_str+";reply="
+ +reply.substring(len-len/2, len-1));
+ }
+ }
+
+ // get the full paths of the directory in all the local disks.
+ Path[] getLocalFiles(JobConf conf, String subdir) throws IOException{
+ String[] localDirs = conf.getLocalDirs();
+ Path[] paths = new Path[localDirs.length];
+ FileSystem localFs = FileSystem.getLocal(conf);
+ boolean subdirNeeded = (subdir != null) && (subdir.length() > 0);
+ for (int i = 0; i < localDirs.length; i++) {
+ paths[i] = (subdirNeeded) ? new Path(localDirs[i], subdir)
+ : new Path(localDirs[i]);
+ paths[i] = paths[i].makeQualified(localFs);
+ }
+ return paths;
+ }
+
+ FileSystem getLocalFileSystem(){
+ return localFs;
+ }
+
+ // only used by tests
+ void setLocalFileSystem(FileSystem fs){
+ localFs = fs;
+ }
+
+ int getMaxCurrentMapTasks() {
+ return maxMapSlots;
+ }
+
+ int getMaxCurrentReduceTasks() {
+ return maxReduceSlots;
+ }
+
+ //called from unit test
+ synchronized void setMaxMapSlots(int mapSlots) {
+ maxMapSlots = mapSlots;
+ }
+
+ //called from unit test
+ synchronized void setMaxReduceSlots(int reduceSlots) {
+ maxReduceSlots = reduceSlots;
+ }
+
+ /**
+ * Is the TaskMemoryManager Enabled on this system?
+ * @return true if enabled, false otherwise.
+ */
+ public boolean isTaskMemoryManagerEnabled() {
+ return taskMemoryManagerEnabled;
+ }
+
+ public TaskMemoryManagerThread getTaskMemoryManager() {
+ return taskMemoryManager;
+ }
+
+ /**
+ * Normalize the negative values in configuration
+ *
+ * @param val
+ * @return normalized val
+ */
+ private long normalizeMemoryConfigValue(long val) {
+ if (val < 0) {
+ val = JobConf.DISABLED_MEMORY_LIMIT;
+ }
+ return val;
+ }
+
+ /**
+ * Memory-related setup
+ */
+ private void initializeMemoryManagement() {
+
+ //handling @deprecated
+ if (fConf.get(MAPRED_TASKTRACKER_VMEM_RESERVED_PROPERTY) != null) {
+ LOG.warn(
+ JobConf.deprecatedString(
+ MAPRED_TASKTRACKER_VMEM_RESERVED_PROPERTY));
+ }
+
+ //handling @deprecated
+ if (fConf.get(MAPRED_TASKTRACKER_PMEM_RESERVED_PROPERTY) != null) {
+ LOG.warn(
+ JobConf.deprecatedString(
+ MAPRED_TASKTRACKER_PMEM_RESERVED_PROPERTY));
+ }
+
+ //handling @deprecated
+ if (fConf.get(JobConf.MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY) != null) {
+ LOG.warn(
+ JobConf.deprecatedString(
+ JobConf.MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY));
+ }
+
+ //handling @deprecated
+ if (fConf.get(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY) != null) {
+ LOG.warn(
+ JobConf.deprecatedString(
+ JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY));
+ }
+
+ // Use TT_MEMORY_CALCULATOR_PLUGIN if it is configured.
+ Class<? extends MemoryCalculatorPlugin> clazz =
+ fConf.getClass(TT_MEMORY_CALCULATOR_PLUGIN,
+ null, MemoryCalculatorPlugin.class);
+ MemoryCalculatorPlugin memoryCalculatorPlugin = (clazz == null ?
+ null : MemoryCalculatorPlugin.getMemoryCalculatorPlugin(clazz, fConf));
+ if (memoryCalculatorPlugin != null || resourceCalculatorPlugin != null) {
+ totalVirtualMemoryOnTT = (memoryCalculatorPlugin == null ?
+ resourceCalculatorPlugin.getVirtualMemorySize() :
+ memoryCalculatorPlugin.getVirtualMemorySize());
+ if (totalVirtualMemoryOnTT <= 0) {
+ LOG.warn("TaskTracker's totalVmem could not be calculated. "
+ + "Setting it to " + JobConf.DISABLED_MEMORY_LIMIT);
+ totalVirtualMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT;
+ }
+ totalPhysicalMemoryOnTT = (memoryCalculatorPlugin == null ?
+ resourceCalculatorPlugin.getPhysicalMemorySize() :
+ memoryCalculatorPlugin.getPhysicalMemorySize());
+ if (totalPhysicalMemoryOnTT <= 0) {
+ LOG.warn("TaskTracker's totalPmem could not be calculated. "
+ + "Setting it to " + JobConf.DISABLED_MEMORY_LIMIT);
+ totalPhysicalMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT;
+ }
+ }
+
+ mapSlotMemorySizeOnTT =
+ fConf.getLong(
+ MAPMEMORY_MB,
+ JobConf.DISABLED_MEMORY_LIMIT);
+ reduceSlotSizeMemoryOnTT =
+ fConf.getLong(
+ REDUCEMEMORY_MB,
+ JobConf.DISABLED_MEMORY_LIMIT);
+ totalMemoryAllottedForTasks =
+ maxMapSlots * mapSlotMemorySizeOnTT + maxReduceSlots
+ * reduceSlotSizeMemoryOnTT;
+ if (totalMemoryAllottedForTasks < 0) {
+ //adding check for the old keys which might be used by the administrator
+ //while configuration of the memory monitoring on TT
+ long memoryAllotedForSlot = fConf.normalizeMemoryConfigValue(
+ fConf.getLong(JobConf.MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY,
+ JobConf.DISABLED_MEMORY_LIMIT));
+ long limitVmPerTask = fConf.normalizeMemoryConfigValue(
+ fConf.getLong(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY,
+ JobConf.DISABLED_MEMORY_LIMIT));
+ if(memoryAllotedForSlot == JobConf.DISABLED_MEMORY_LIMIT) {
+ totalMemoryAllottedForTasks = JobConf.DISABLED_MEMORY_LIMIT;
+ } else {
+ if(memoryAllotedForSlot > limitVmPerTask) {
+ LOG.info("DefaultMaxVmPerTask is mis-configured. " +
+ "It shouldn't be greater than task limits");
+ totalMemoryAllottedForTasks = JobConf.DISABLED_MEMORY_LIMIT;
+ } else {
+ totalMemoryAllottedForTasks = (maxMapSlots +
+ maxReduceSlots) * (memoryAllotedForSlot/(1024 * 1024));
+ }
+ }
+ }
+ if (totalMemoryAllottedForTasks > totalPhysicalMemoryOnTT) {
+ LOG.info("totalMemoryAllottedForTasks > totalPhysicalMemoryOnTT."
+ + " Thrashing might happen.");
+ } else if (totalMemoryAllottedForTasks > totalVirtualMemoryOnTT) {
+ LOG.info("totalMemoryAllottedForTasks > totalVirtualMemoryOnTT."
+ + " Thrashing might happen.");
+ }
+
+ reservedPhysicalMemoryOnTT =
+ fConf.getLong(TTConfig.TT_RESERVED_PHYSCIALMEMORY_MB,
+ JobConf.DISABLED_MEMORY_LIMIT);
+ reservedPhysicalMemoryOnTT =
+ reservedPhysicalMemoryOnTT == JobConf.DISABLED_MEMORY_LIMIT ?
+ JobConf.DISABLED_MEMORY_LIMIT :
+ reservedPhysicalMemoryOnTT * 1024 * 1024; // normalize to bytes
+
+ // start the taskMemoryManager thread only if enabled
+ setTaskMemoryManagerEnabledFlag();
+ if (isTaskMemoryManagerEnabled()) {
+ taskMemoryManager = new TaskMemoryManagerThread(this);
+ taskMemoryManager.setDaemon(true);
+ taskMemoryManager.start();
+ }
+ }
+
+ void setTaskMemoryManagerEnabledFlag() {
+ if (!ProcfsBasedProcessTree.isAvailable()) {
+ LOG.info("ProcessTree implementation is missing on this system. "
+ + "TaskMemoryManager is disabled.");
+ taskMemoryManagerEnabled = false;
+ return;
+ }
+
+ if (reservedPhysicalMemoryOnTT == JobConf.DISABLED_MEMORY_LIMIT
+ && totalMemoryAllottedForTasks == JobConf.DISABLED_MEMORY_LIMIT) {
+ taskMemoryManagerEnabled = false;
+ LOG.warn("TaskTracker's totalMemoryAllottedForTasks is -1 and " +
+ "reserved physical memory is not configured. " +
+ "TaskMemoryManager is disabled.");
+ return;
+ }
+
+ taskMemoryManagerEnabled = true;
+ }
+
+ /**
+ * Clean-up the task that TaskMemoryMangerThread requests to do so.
+ * @param tid
+ * @param wasFailure mark the task as failed or killed. 'failed' if true,
+ * 'killed' otherwise
+ * @param diagnosticMsg
+ */
+ synchronized void cleanUpOverMemoryTask(TaskAttemptID tid, boolean wasFailure,
+ String diagnosticMsg) {
+ TaskInProgress tip = runningTasks.get(tid);
+ if (tip != null) {
+ tip.reportDiagnosticInfo(diagnosticMsg);
+ try {
+ purgeTask(tip, wasFailure); // Marking it as failed/killed.
+ } catch (IOException ioe) {
+ LOG.warn("Couldn't purge the task of " + tid + ". Error : " + ioe);
+ }
+ }
+ }
+
+ /**
+ * Wrapper method used by TaskTracker to check if {@link NodeHealthCheckerService}
+ * can be started
+ * @param conf configuration used to check if service can be started
+ * @return true if service can be started
+ */
+ private boolean shouldStartHealthMonitor(Configuration conf) {
+ return NodeHealthCheckerService.shouldRun(conf);
+ }
+
+ /**
+ * Wrapper method used to start {@link NodeHealthCheckerService} for
+ * Task Tracker
+ * @param conf Configuration used by the service.
+ */
+ private void startHealthMonitor(Configuration conf) {
+ healthChecker = new NodeHealthCheckerService(conf);
+ healthChecker.start();
+ }
+
+ TrackerDistributedCacheManager getTrackerDistributedCacheManager() {
+ return distributedCacheManager;
+ }
+
+ /**
+ * Download the job-token file from the FS and save on local fs.
+ * @param user
+ * @param jobId
+ * @param jobConf
+ * @return the local file system path of the downloaded file.
+ * @throws IOException
+ */
+ private String localizeJobTokenFile(String user, JobID jobId)
+ throws IOException {
+ // check if the tokenJob file is there..
+ Path skPath = new Path(systemDirectory,
+ jobId.toString()+"/"+TokenCache.JOB_TOKEN_HDFS_FILE);
+
+ FileStatus status = null;
+ long jobTokenSize = -1;
+ status = systemFS.getFileStatus(skPath); //throws FileNotFoundException
+ jobTokenSize = status.getLen();
+
+ Path localJobTokenFile =
+ lDirAlloc.getLocalPathForWrite(getLocalJobTokenFile(user,
+ jobId.toString()), jobTokenSize, fConf);
+ String localJobTokenFileStr = localJobTokenFile.toUri().getPath();
+ LOG.debug("localizingJobTokenFile from sd="+skPath.toUri().getPath() +
+ " to " + localJobTokenFileStr);
+
+ // Download job_token
+ systemFS.copyToLocalFile(skPath, localJobTokenFile);
+ return localJobTokenFileStr;
+ }
+
+ JobACLsManager getJobACLsManager() {
+ return aclsManager.getJobACLsManager();
+ }
+
+ ACLsManager getACLsManager() {
+ return aclsManager;
+ }
+
+ synchronized TaskInProgress getRunningTask(TaskAttemptID tid) {
+ return runningTasks.get(tid);
+ }
+}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskTrackerAction.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskTrackerAction.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskTrackerAction.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskTrackerAction.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskTrackerInstrumentation.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskTrackerInstrumentation.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskTrackerInstrumentation.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskTrackerInstrumentation.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskTrackerManager.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskTrackerManager.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskTrackerManager.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskTrackerManager.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskTrackerMetricsInst.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskTrackerMetricsInst.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskTrackerMetricsInst.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskTrackerMetricsInst.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskTrackerStatus.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskTrackerStatus.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/TaskTrackerStatus.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/TaskTrackerStatus.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/UserLogCleaner.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/UserLogCleaner.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/UserLogCleaner.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/UserLogCleaner.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/jobcontrol/package.html b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/jobcontrol/package.html
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/jobcontrol/package.html
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/jobcontrol/package.html
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/join/package.html b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/join/package.html
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/join/package.html
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/join/package.html
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/package.html b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/package.html
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/package.html
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/lib/aggregate/package.html
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/db/package.html b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/lib/db/package.html
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/db/package.html
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/lib/db/package.html
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/lib/package.html b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/lib/package.html
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/lib/package.html
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/lib/package.html
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/package.html b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/package.html
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/package.html
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/package.html
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/pipes/package.html b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/pipes/package.html
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/pipes/package.html
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/pipes/package.html
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/tools/GetGroups.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/tools/GetGroups.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/tools/GetGroups.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/tools/GetGroups.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/tools/MRAdmin.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/tools/MRAdmin.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/tools/MRAdmin.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/tools/MRAdmin.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/tools/package-info.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapred/tools/package-info.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapred/tools/package-info.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapred/tools/package-info.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/FileSystemCounter.properties b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/FileSystemCounter.properties
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/FileSystemCounter.properties
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/FileSystemCounter.properties
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/JobCounter.properties b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/JobCounter.properties
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/JobCounter.properties
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/JobCounter.properties
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/TaskCounter.properties b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/TaskCounter.properties
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/TaskCounter.properties
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/TaskCounter.properties
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/filecache/TaskDistributedCacheManager.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/filecache/TaskDistributedCacheManager.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/filecache/TaskDistributedCacheManager.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/filecache/TaskDistributedCacheManager.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/filecache/TrackerDistributedCacheManager.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/filecache/TrackerDistributedCacheManager.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/filecache/TrackerDistributedCacheManager.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/filecache/TrackerDistributedCacheManager.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/package.html b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/package.html
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/package.html
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/lib/aggregate/package.html
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/package.html b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/package.html
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/package.html
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/lib/db/package.html
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormatCounter.properties b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormatCounter.properties
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormatCounter.properties
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormatCounter.properties
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/jobcontrol/package.html b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/lib/jobcontrol/package.html
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/jobcontrol/package.html
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/lib/jobcontrol/package.html
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/package.html b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/package.html
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/package.html
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/lib/join/package.html
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormatCounter.properties b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormatCounter.properties
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormatCounter.properties
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormatCounter.properties
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/server/jobtracker/JobTrackerJspHelper.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/server/jobtracker/JobTrackerJspHelper.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/server/jobtracker/JobTrackerJspHelper.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/server/jobtracker/JobTrackerJspHelper.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/server/jobtracker/State.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/server/jobtracker/State.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/server/jobtracker/State.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/server/jobtracker/State.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/server/jobtracker/TaskTracker.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/server/jobtracker/TaskTracker.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/server/jobtracker/TaskTracker.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/server/jobtracker/TaskTracker.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/server/tasktracker/Localizer.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/server/tasktracker/Localizer.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/server/tasktracker/Localizer.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/server/tasktracker/Localizer.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/server/tasktracker/package-info.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/server/tasktracker/package-info.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/server/tasktracker/package-info.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/server/tasktracker/package-info.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/tools/package-info.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/tools/package-info.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/tools/package-info.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/tools/package-info.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/util/LinuxMemoryCalculatorPlugin.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/util/LinuxMemoryCalculatorPlugin.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/util/LinuxMemoryCalculatorPlugin.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/util/LinuxMemoryCalculatorPlugin.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/util/MRAsyncDiskService.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/util/MRAsyncDiskService.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/util/MRAsyncDiskService.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/util/MRAsyncDiskService.java
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/util/MemoryCalculatorPlugin.java b/hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/util/MemoryCalculatorPlugin.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/mapreduce/util/MemoryCalculatorPlugin.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/mapreduce/util/MemoryCalculatorPlugin.java
diff --git a/mapreduce/src/java/org/apache/hadoop/util/LinuxMemoryCalculatorPlugin.java b/hadoop-mapreduce/src/java/org/apache/hadoop/util/LinuxMemoryCalculatorPlugin.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/util/LinuxMemoryCalculatorPlugin.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/util/LinuxMemoryCalculatorPlugin.java
diff --git a/mapreduce/src/java/org/apache/hadoop/util/MemoryCalculatorPlugin.java b/hadoop-mapreduce/src/java/org/apache/hadoop/util/MemoryCalculatorPlugin.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/util/MemoryCalculatorPlugin.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/util/MemoryCalculatorPlugin.java
diff --git a/mapreduce/src/java/org/apache/hadoop/util/ProcessTree.java b/hadoop-mapreduce/src/java/org/apache/hadoop/util/ProcessTree.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/util/ProcessTree.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/util/ProcessTree.java
diff --git a/mapreduce/src/java/org/apache/hadoop/util/ProcfsBasedProcessTree.java b/hadoop-mapreduce/src/java/org/apache/hadoop/util/ProcfsBasedProcessTree.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/util/ProcfsBasedProcessTree.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/util/ProcfsBasedProcessTree.java
diff --git a/mapreduce/src/java/org/apache/hadoop/util/package-info.java b/hadoop-mapreduce/src/java/org/apache/hadoop/util/package-info.java
similarity index 100%
rename from mapreduce/src/java/org/apache/hadoop/util/package-info.java
rename to hadoop-mapreduce/src/java/org/apache/hadoop/util/package-info.java
diff --git a/mapreduce/src/java/overview.html b/hadoop-mapreduce/src/java/overview.html
similarity index 100%
rename from mapreduce/src/java/overview.html
rename to hadoop-mapreduce/src/java/overview.html
diff --git a/mapreduce/src/packages/deb/hadoop.control/conffile b/hadoop-mapreduce/src/packages/deb/hadoop.control/conffile
similarity index 100%
rename from mapreduce/src/packages/deb/hadoop.control/conffile
rename to hadoop-mapreduce/src/packages/deb/hadoop.control/conffile
diff --git a/mapreduce/src/packages/deb/hadoop.control/control b/hadoop-mapreduce/src/packages/deb/hadoop.control/control
similarity index 100%
rename from mapreduce/src/packages/deb/hadoop.control/control
rename to hadoop-mapreduce/src/packages/deb/hadoop.control/control
diff --git a/mapreduce/src/packages/deb/hadoop.control/postinst b/hadoop-mapreduce/src/packages/deb/hadoop.control/postinst
similarity index 100%
rename from mapreduce/src/packages/deb/hadoop.control/postinst
rename to hadoop-mapreduce/src/packages/deb/hadoop.control/postinst
diff --git a/mapreduce/src/packages/deb/hadoop.control/postrm b/hadoop-mapreduce/src/packages/deb/hadoop.control/postrm
similarity index 100%
rename from mapreduce/src/packages/deb/hadoop.control/postrm
rename to hadoop-mapreduce/src/packages/deb/hadoop.control/postrm
diff --git a/mapreduce/src/packages/deb/hadoop.control/preinst b/hadoop-mapreduce/src/packages/deb/hadoop.control/preinst
similarity index 100%
rename from mapreduce/src/packages/deb/hadoop.control/preinst
rename to hadoop-mapreduce/src/packages/deb/hadoop.control/preinst
diff --git a/mapreduce/src/packages/deb/hadoop.control/prerm b/hadoop-mapreduce/src/packages/deb/hadoop.control/prerm
similarity index 100%
rename from mapreduce/src/packages/deb/hadoop.control/prerm
rename to hadoop-mapreduce/src/packages/deb/hadoop.control/prerm
diff --git a/mapreduce/src/packages/deb/init.d/hadoop-jobtracker b/hadoop-mapreduce/src/packages/deb/init.d/hadoop-jobtracker
similarity index 100%
rename from mapreduce/src/packages/deb/init.d/hadoop-jobtracker
rename to hadoop-mapreduce/src/packages/deb/init.d/hadoop-jobtracker
diff --git a/mapreduce/src/packages/deb/init.d/hadoop-tasktracker b/hadoop-mapreduce/src/packages/deb/init.d/hadoop-tasktracker
similarity index 100%
rename from mapreduce/src/packages/deb/init.d/hadoop-tasktracker
rename to hadoop-mapreduce/src/packages/deb/init.d/hadoop-tasktracker
diff --git a/mapreduce/src/packages/rpm/init.d/hadoop-jobtracker b/hadoop-mapreduce/src/packages/rpm/init.d/hadoop-jobtracker
similarity index 100%
rename from mapreduce/src/packages/rpm/init.d/hadoop-jobtracker
rename to hadoop-mapreduce/src/packages/rpm/init.d/hadoop-jobtracker
diff --git a/mapreduce/src/packages/rpm/init.d/hadoop-tasktracker b/hadoop-mapreduce/src/packages/rpm/init.d/hadoop-tasktracker
similarity index 100%
rename from mapreduce/src/packages/rpm/init.d/hadoop-tasktracker
rename to hadoop-mapreduce/src/packages/rpm/init.d/hadoop-tasktracker
diff --git a/mapreduce/src/packages/rpm/spec/hadoop-mapred.spec b/hadoop-mapreduce/src/packages/rpm/spec/hadoop-mapred.spec
similarity index 100%
rename from mapreduce/src/packages/rpm/spec/hadoop-mapred.spec
rename to hadoop-mapreduce/src/packages/rpm/spec/hadoop-mapred.spec
diff --git a/mapreduce/src/packages/templates/conf/mapred-site.xml b/hadoop-mapreduce/src/packages/templates/conf/mapred-site.xml
similarity index 100%
rename from mapreduce/src/packages/templates/conf/mapred-site.xml
rename to hadoop-mapreduce/src/packages/templates/conf/mapred-site.xml
diff --git a/mapreduce/src/packages/update-mapred-env.sh b/hadoop-mapreduce/src/packages/update-mapred-env.sh
similarity index 100%
rename from mapreduce/src/packages/update-mapred-env.sh
rename to hadoop-mapreduce/src/packages/update-mapred-env.sh
diff --git a/mapreduce/src/test/all-tests b/hadoop-mapreduce/src/test/all-tests
similarity index 100%
rename from mapreduce/src/test/all-tests
rename to hadoop-mapreduce/src/test/all-tests
diff --git a/mapreduce/src/test/aop/build/aop.xml b/hadoop-mapreduce/src/test/aop/build/aop.xml
similarity index 100%
rename from mapreduce/src/test/aop/build/aop.xml
rename to hadoop-mapreduce/src/test/aop/build/aop.xml
diff --git a/mapreduce/src/test/aop/org/apache/hadoop/fi/FiConfig.java b/hadoop-mapreduce/src/test/aop/org/apache/hadoop/fi/FiConfig.java
similarity index 100%
rename from mapreduce/src/test/aop/org/apache/hadoop/fi/FiConfig.java
rename to hadoop-mapreduce/src/test/aop/org/apache/hadoop/fi/FiConfig.java
diff --git a/mapreduce/src/test/aop/org/apache/hadoop/fi/ProbabilityModel.java b/hadoop-mapreduce/src/test/aop/org/apache/hadoop/fi/ProbabilityModel.java
similarity index 100%
rename from mapreduce/src/test/aop/org/apache/hadoop/fi/ProbabilityModel.java
rename to hadoop-mapreduce/src/test/aop/org/apache/hadoop/fi/ProbabilityModel.java
diff --git a/mapreduce/src/test/checkstyle-noframes-sorted.xsl b/hadoop-mapreduce/src/test/checkstyle-noframes-sorted.xsl
similarity index 100%
rename from mapreduce/src/test/checkstyle-noframes-sorted.xsl
rename to hadoop-mapreduce/src/test/checkstyle-noframes-sorted.xsl
diff --git a/mapreduce/src/test/checkstyle.xml b/hadoop-mapreduce/src/test/checkstyle.xml
similarity index 100%
rename from mapreduce/src/test/checkstyle.xml
rename to hadoop-mapreduce/src/test/checkstyle.xml
diff --git a/mapreduce/src/test/commit-tests b/hadoop-mapreduce/src/test/commit-tests
similarity index 100%
rename from mapreduce/src/test/commit-tests
rename to hadoop-mapreduce/src/test/commit-tests
diff --git a/mapreduce/src/test/empty-file b/hadoop-mapreduce/src/test/empty-file
similarity index 100%
rename from mapreduce/src/test/empty-file
rename to hadoop-mapreduce/src/test/empty-file
diff --git a/mapreduce/src/test/fi-site.xml b/hadoop-mapreduce/src/test/fi-site.xml
similarity index 100%
rename from mapreduce/src/test/fi-site.xml
rename to hadoop-mapreduce/src/test/fi-site.xml
diff --git a/mapreduce/src/test/findbugsExcludeFile.xml b/hadoop-mapreduce/src/test/findbugsExcludeFile.xml
similarity index 100%
rename from mapreduce/src/test/findbugsExcludeFile.xml
rename to hadoop-mapreduce/src/test/findbugsExcludeFile.xml
diff --git a/mapreduce/src/test/hadoop-policy.xml b/hadoop-mapreduce/src/test/hadoop-policy.xml
similarity index 100%
rename from mapreduce/src/test/hadoop-policy.xml
rename to hadoop-mapreduce/src/test/hadoop-policy.xml
diff --git a/mapreduce/src/test/krb5.conf b/hadoop-mapreduce/src/test/krb5.conf
similarity index 100%
rename from mapreduce/src/test/krb5.conf
rename to hadoop-mapreduce/src/test/krb5.conf
diff --git a/mapreduce/src/test/log4j.properties b/hadoop-mapreduce/src/test/log4j.properties
similarity index 100%
rename from mapreduce/src/test/log4j.properties
rename to hadoop-mapreduce/src/test/log4j.properties
diff --git a/mapreduce/src/test/mapred-site.xml b/hadoop-mapreduce/src/test/mapred-site.xml
similarity index 100%
rename from mapreduce/src/test/mapred-site.xml
rename to hadoop-mapreduce/src/test/mapred-site.xml
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/cli/CLITestCmdMR.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/cli/CLITestCmdMR.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/cli/CLITestCmdMR.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/cli/CLITestCmdMR.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/cli/TestMRCLI.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/cli/TestMRCLI.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/cli/TestMRCLI.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/cli/TestMRCLI.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/cli/data60bytes b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/cli/data60bytes
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/cli/data60bytes
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/cli/data60bytes
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/cli/testMRConf.xml b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/cli/testMRConf.xml
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/cli/testMRConf.xml
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/cli/testMRConf.xml
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/cli/util/CLICommandArchive.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/cli/util/CLICommandArchive.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/cli/util/CLICommandArchive.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/cli/util/CLICommandArchive.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/cli/util/CLICommandMRAdmin.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/cli/util/CLICommandMRAdmin.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/cli/util/CLICommandMRAdmin.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/cli/util/CLICommandMRAdmin.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/conf/TestJobConf.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/conf/TestJobConf.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/conf/TestJobConf.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/conf/TestJobConf.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/conf/TestNoDefaultsJobConf.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/conf/TestNoDefaultsJobConf.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/conf/TestNoDefaultsJobConf.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/conf/TestNoDefaultsJobConf.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/examples/TestBaileyBorweinPlouffe.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/examples/TestBaileyBorweinPlouffe.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/examples/TestBaileyBorweinPlouffe.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/examples/TestBaileyBorweinPlouffe.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/examples/pi/math/TestLongLong.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/examples/pi/math/TestLongLong.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/examples/pi/math/TestLongLong.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/examples/pi/math/TestLongLong.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/examples/pi/math/TestModular.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/examples/pi/math/TestModular.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/examples/pi/math/TestModular.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/examples/pi/math/TestModular.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/examples/pi/math/TestSummation.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/examples/pi/math/TestSummation.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/examples/pi/math/TestSummation.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/examples/pi/math/TestSummation.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/examples/terasort/TestTeraSort.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/examples/terasort/TestTeraSort.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/examples/terasort/TestTeraSort.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/examples/terasort/TestTeraSort.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/AccumulatingReducer.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/AccumulatingReducer.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/AccumulatingReducer.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/AccumulatingReducer.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/DFSCIOTest.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/DFSCIOTest.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/DFSCIOTest.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/DFSCIOTest.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/DistributedFSCheck.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/DistributedFSCheck.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/DistributedFSCheck.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/DistributedFSCheck.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/IOMapperBase.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/IOMapperBase.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/IOMapperBase.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/IOMapperBase.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/JHLogAnalyzer.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/JHLogAnalyzer.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/JHLogAnalyzer.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/JHLogAnalyzer.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/TestDFSIO.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/TestDFSIO.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/TestDFSIO.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/TestDFSIO.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/TestFileSystem.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/TestFileSystem.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/TestFileSystem.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/TestFileSystem.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/TestHarFileSystem.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/TestHarFileSystem.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/TestHarFileSystem.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/TestHarFileSystem.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/TestJHLA.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/TestJHLA.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/TestJHLA.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/TestJHLA.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/AppendOp.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/AppendOp.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/AppendOp.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/AppendOp.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ArgumentParser.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ArgumentParser.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ArgumentParser.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ArgumentParser.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/BadFileException.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/BadFileException.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/BadFileException.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/BadFileException.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ConfigExtractor.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ConfigExtractor.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ConfigExtractor.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ConfigExtractor.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ConfigMerger.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ConfigMerger.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ConfigMerger.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ConfigMerger.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ConfigOption.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ConfigOption.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ConfigOption.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ConfigOption.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Constants.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Constants.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Constants.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Constants.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/CreateOp.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/CreateOp.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/CreateOp.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/CreateOp.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/DataHasher.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/DataHasher.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/DataHasher.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/DataHasher.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/DataVerifier.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/DataVerifier.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/DataVerifier.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/DataVerifier.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/DataWriter.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/DataWriter.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/DataWriter.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/DataWriter.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/DeleteOp.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/DeleteOp.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/DeleteOp.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/DeleteOp.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/DummyInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/DummyInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/DummyInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/DummyInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Formatter.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Formatter.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Formatter.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Formatter.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Helper.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Helper.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Helper.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Helper.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ListOp.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ListOp.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ListOp.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ListOp.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/MkdirOp.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/MkdirOp.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/MkdirOp.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/MkdirOp.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ObserveableOp.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ObserveableOp.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ObserveableOp.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ObserveableOp.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Operation.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Operation.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Operation.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Operation.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/OperationData.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/OperationData.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/OperationData.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/OperationData.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/OperationFactory.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/OperationFactory.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/OperationFactory.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/OperationFactory.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/OperationOutput.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/OperationOutput.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/OperationOutput.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/OperationOutput.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/OperationWeight.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/OperationWeight.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/OperationWeight.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/OperationWeight.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/PathFinder.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/PathFinder.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/PathFinder.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/PathFinder.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Range.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Range.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Range.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Range.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ReadOp.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ReadOp.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ReadOp.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ReadOp.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/RenameOp.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/RenameOp.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/RenameOp.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/RenameOp.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ReportWriter.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ReportWriter.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ReportWriter.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/ReportWriter.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/RouletteSelector.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/RouletteSelector.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/RouletteSelector.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/RouletteSelector.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/SleepOp.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/SleepOp.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/SleepOp.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/SleepOp.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/SliveMapper.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/SliveMapper.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/SliveMapper.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/SliveMapper.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/SlivePartitioner.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/SlivePartitioner.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/SlivePartitioner.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/SlivePartitioner.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/SliveReducer.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/SliveReducer.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/SliveReducer.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/SliveReducer.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/SliveTest.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/SliveTest.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/SliveTest.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/SliveTest.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/TestSlive.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/TestSlive.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/TestSlive.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/TestSlive.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Timer.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Timer.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Timer.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Timer.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/WeightSelector.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/WeightSelector.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/WeightSelector.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/WeightSelector.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Weights.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Weights.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Weights.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/fs/slive/Weights.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/hdfs/NNBench.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/hdfs/NNBench.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/hdfs/NNBench.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/hdfs/NNBench.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/hdfs/NNBenchWithoutMR.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/hdfs/NNBenchWithoutMR.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/hdfs/NNBenchWithoutMR.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/hdfs/NNBenchWithoutMR.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/io/FileBench.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/io/FileBench.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/io/FileBench.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/io/FileBench.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/ipc/TestSocketFactory.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/ipc/TestSocketFactory.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/ipc/TestSocketFactory.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/ipc/TestSocketFactory.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/BigMapOutput.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/BigMapOutput.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/BigMapOutput.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/BigMapOutput.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/ControlledMapReduceJob.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/ControlledMapReduceJob.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/ControlledMapReduceJob.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/ControlledMapReduceJob.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/DummyMemoryCalculatorPlugin.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/DummyMemoryCalculatorPlugin.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/DummyMemoryCalculatorPlugin.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/DummyMemoryCalculatorPlugin.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/DummyResourceCalculatorPlugin.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/DummyResourceCalculatorPlugin.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/DummyResourceCalculatorPlugin.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/DummyResourceCalculatorPlugin.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/DummyTaskTrackerInstrumentation.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/DummyTaskTrackerInstrumentation.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/DummyTaskTrackerInstrumentation.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/DummyTaskTrackerInstrumentation.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/EmptyInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/EmptyInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/EmptyInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/EmptyInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/FakeObjectUtilities.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/FakeObjectUtilities.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/FakeObjectUtilities.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/FakeObjectUtilities.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/GenericMRLoadGenerator.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/GenericMRLoadGenerator.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/GenericMRLoadGenerator.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/GenericMRLoadGenerator.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/HadoopTestCase.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/HadoopTestCase.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/HadoopTestCase.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/HadoopTestCase.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/JobClientUnitTest.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/JobClientUnitTest.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/JobClientUnitTest.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/JobClientUnitTest.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/MRBench.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/MRBench.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/MRBench.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/MRBench.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/MRCaching.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/MRCaching.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/MRCaching.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/MRCaching.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/MiniMRCluster.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/MiniMRCluster.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/MiniMRCluster.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/MiniMRCluster.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/NotificationTestCase.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/NotificationTestCase.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/NotificationTestCase.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/NotificationTestCase.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/QueueManagerTestUtils.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/QueueManagerTestUtils.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/QueueManagerTestUtils.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/QueueManagerTestUtils.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/ReliabilityTest.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/ReliabilityTest.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/ReliabilityTest.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/ReliabilityTest.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/SortValidator.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/SortValidator.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/SortValidator.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/SortValidator.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestAdminOperationsProtocolWithServiceAuthorization.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestAdminOperationsProtocolWithServiceAuthorization.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestAdminOperationsProtocolWithServiceAuthorization.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestAdminOperationsProtocolWithServiceAuthorization.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestAuditLogger.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestAuditLogger.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestAuditLogger.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestAuditLogger.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestBadRecords.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestBadRecords.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestBadRecords.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestBadRecords.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestClusterMRNotification.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestClusterMRNotification.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestClusterMRNotification.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestClusterMRNotification.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestClusterMapReduceTestCase.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestClusterMapReduceTestCase.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestClusterMapReduceTestCase.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestClusterMapReduceTestCase.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestClusterStatus.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestClusterStatus.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestClusterStatus.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestClusterStatus.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCollect.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCollect.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCollect.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCollect.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCombineFileInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCombineFileInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCombineFileInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCombineFileInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCombineOutputCollector.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCombineOutputCollector.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCombineOutputCollector.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCombineOutputCollector.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCommandLineJobSubmission.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCommandLineJobSubmission.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCommandLineJobSubmission.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCommandLineJobSubmission.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestComparators.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestComparators.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestComparators.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestComparators.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCompositeTaskTrackerInstrumentation.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCompositeTaskTrackerInstrumentation.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCompositeTaskTrackerInstrumentation.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCompositeTaskTrackerInstrumentation.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCompressedEmptyMapOutputs.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCompressedEmptyMapOutputs.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCompressedEmptyMapOutputs.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCompressedEmptyMapOutputs.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestControlledMapReduceJob.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestControlledMapReduceJob.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestControlledMapReduceJob.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestControlledMapReduceJob.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCounters.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCounters.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCounters.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestCounters.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestDebugScript.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestDebugScript.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestDebugScript.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestDebugScript.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestDebugScriptWithLinuxTaskController.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestDebugScriptWithLinuxTaskController.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestDebugScriptWithLinuxTaskController.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestDebugScriptWithLinuxTaskController.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestEmptyJob.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestEmptyJob.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestEmptyJob.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestEmptyJob.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestFieldSelection.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestFieldSelection.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestFieldSelection.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestFieldSelection.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestFileInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestFileInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestFileInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestFileInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestFileInputFormatPathFilter.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestFileInputFormatPathFilter.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestFileInputFormatPathFilter.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestFileInputFormatPathFilter.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestFileOutputCommitter.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestFileOutputCommitter.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestFileOutputCommitter.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestFileOutputCommitter.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestFileOutputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestFileOutputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestFileOutputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestFileOutputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestGetSplitHosts.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestGetSplitHosts.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestGetSplitHosts.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestGetSplitHosts.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestIFile.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestIFile.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestIFile.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestIFile.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestIFileStreams.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestIFileStreams.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestIFileStreams.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestIFileStreams.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestIndexCache.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestIndexCache.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestIndexCache.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestIndexCache.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestInputPath.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestInputPath.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestInputPath.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestInputPath.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJavaSerialization.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJavaSerialization.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJavaSerialization.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJavaSerialization.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobCleanup.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobCleanup.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobCleanup.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobCleanup.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobClient.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobClient.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobClient.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobClient.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobConf.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobConf.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobConf.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobConf.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobCounters.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobCounters.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobCounters.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobCounters.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobDirCleanup.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobDirCleanup.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobDirCleanup.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobDirCleanup.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobHistory.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobHistory.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobHistory.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobHistory.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobHistoryParsing.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobHistoryParsing.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobHistoryParsing.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobHistoryParsing.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgress.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgress.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgress.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgress.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgressListener.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgressListener.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgressListener.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgressListener.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobKillAndFail.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobKillAndFail.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobKillAndFail.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobKillAndFail.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobName.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobName.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobName.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobName.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueClient.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueClient.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueClient.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueClient.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueTaskScheduler.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueTaskScheduler.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueTaskScheduler.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueTaskScheduler.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobRetire.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobRetire.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobRetire.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobRetire.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobStatusPersistency.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobStatusPersistency.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobStatusPersistency.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobStatusPersistency.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobSysDirWithDFS.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobSysDirWithDFS.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobSysDirWithDFS.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobSysDirWithDFS.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerInstrumentation.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerInstrumentation.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerInstrumentation.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerInstrumentation.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerStart.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerStart.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerStart.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerStart.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerXmlJsp.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerXmlJsp.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerXmlJsp.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerXmlJsp.java
diff --git a/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJvmManager.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJvmManager.java
new file mode 100644
index 0000000..490ee4cb
--- /dev/null
+++ b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJvmManager.java
@@ -0,0 +1,327 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.FileReader;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Vector;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.mapred.JvmManager.JvmManagerForType;
+import org.apache.hadoop.mapred.JvmManager.JvmManagerForType.JvmRunner;
+import org.apache.hadoop.mapred.TaskTracker.TaskInProgress;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
+import org.junit.After;
+import static org.junit.Assert.*;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestJvmManager {
+ static final Log LOG = LogFactory.getLog(TestJvmManager.class);
+
+ private static File TEST_DIR = new File(System.getProperty("test.build.data",
+ "/tmp"), TestJvmManager.class.getSimpleName());
+ private static int MAP_SLOTS = 10;
+ private static int REDUCE_SLOTS = 10;
+ private TaskTracker tt;
+ private JvmManager jvmManager;
+ private JobConf ttConf;
+
+ @Before
+ public void setUp() {
+ TEST_DIR.mkdirs();
+ }
+
+ @After
+ public void tearDown() throws IOException {
+ FileUtil.fullyDelete(TEST_DIR);
+ }
+
+ public TestJvmManager() throws Exception {
+ tt = new TaskTracker();
+ ttConf = new JobConf();
+ ttConf.setLong(TTConfig.TT_SLEEP_TIME_BEFORE_SIG_KILL, 2000);
+ tt.setConf(ttConf);
+ tt.setMaxMapSlots(MAP_SLOTS);
+ tt.setMaxReduceSlots(REDUCE_SLOTS);
+ tt.setTaskController(new DefaultTaskController());
+ jvmManager = new JvmManager(tt);
+ tt.setJvmManagerInstance(jvmManager);
+ }
+
+ // write a shell script to execute the command.
+ private File writeScript(String fileName, String cmd, File pidFile) throws IOException {
+ File script = new File(TEST_DIR, fileName);
+ FileOutputStream out = new FileOutputStream(script);
+ // write pid into a file
+ out.write(("echo $$ >" + pidFile.toString() + ";").getBytes());
+ // ignore SIGTERM
+ out.write(("trap '' 15\n").getBytes());
+ // write the actual command it self.
+ out.write(cmd.getBytes());
+ out.close();
+ script.setExecutable(true);
+ return script;
+ }
+
+ /**
+ * Tests the jvm kill from JvmRunner and JvmManager simultaneously.
+ *
+ * Starts a process, which sleeps for 60 seconds, in a thread.
+ * Calls JvmRunner.kill() in a thread.
+ * Also calls JvmManager.taskKilled().
+ * Makes sure that the jvm is killed and JvmManager could launch another task
+ * properly.
+ * @throws Exception
+ */
+ @Test
+ public void testJvmKill() throws Exception {
+ JvmManagerForType mapJvmManager = jvmManager
+ .getJvmManagerForType(TaskType.MAP);
+ // launch a jvm
+ JobConf taskConf = new JobConf(ttConf);
+ TaskAttemptID attemptID = new TaskAttemptID("test", 0, TaskType.MAP, 0, 0);
+ MapTask task = new MapTask(null, attemptID, 0, null, 1);
+ task.setConf(taskConf);
+ TaskInProgress tip = tt.new TaskInProgress(task, taskConf);
+ File pidFile = new File(TEST_DIR, "pid");
+ final TaskRunner taskRunner = new MapTaskRunner(tip, tt, taskConf);
+ // launch a jvm which sleeps for 60 seconds
+ final Vector<String> vargs = new Vector<String>(2);
+ vargs.add(writeScript("SLEEP", "sleep 60\n", pidFile).getAbsolutePath());
+ final File workDir = new File(TEST_DIR, "work");
+ workDir.mkdir();
+ final File stdout = new File(TEST_DIR, "stdout");
+ final File stderr = new File(TEST_DIR, "stderr");
+
+ // launch the process and wait in a thread, till it finishes
+ Thread launcher = new Thread() {
+ public void run() {
+ try {
+ taskRunner.launchJvmAndWait(null, vargs, stdout, stderr, 100,
+ workDir, null);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ return;
+ }
+ }
+ };
+ launcher.start();
+ // wait till the jvm is launched
+ // this loop waits for at most 1 second
+ for (int i = 0; i < 10; i++) {
+ if (pidFile.exists()) {
+ break;
+ }
+ UtilsForTests.waitFor(100);
+ }
+ // assert that the process is launched
+ assertTrue("pidFile is not present", pidFile.exists());
+
+ // imitate Child code.
+ // set pid in jvmManager
+ BufferedReader in = new BufferedReader(new FileReader(pidFile));
+ String pid = in.readLine();
+ in.close();
+ JVMId jvmid = mapJvmManager.runningTaskToJvm.get(taskRunner);
+ jvmManager.setPidToJvm(jvmid, pid);
+
+ // kill JvmRunner
+ final JvmRunner jvmRunner = mapJvmManager.jvmIdToRunner.get(jvmid);
+ Thread killer = new Thread() {
+ public void run() {
+ jvmRunner.kill();
+ }
+ };
+ killer.start();
+
+ //wait for a while so that killer thread is started.
+ Thread.sleep(100);
+
+ // kill the jvm externally
+ taskRunner.kill();
+
+ assertTrue(jvmRunner.killed);
+
+ // launch another jvm and see it finishes properly
+ attemptID = new TaskAttemptID("test", 0, TaskType.MAP, 0, 1);
+ task = new MapTask(null, attemptID, 0, null, 1);
+ task.setConf(taskConf);
+ tip = tt.new TaskInProgress(task, taskConf);
+ TaskRunner taskRunner2 = new MapTaskRunner(tip, tt, taskConf);
+ // build dummy vargs to call ls
+ Vector<String> vargs2 = new Vector<String>(1);
+ vargs2.add(writeScript("LS", "ls", pidFile).getAbsolutePath());
+ File workDir2 = new File(TEST_DIR, "work2");
+ workDir.mkdir();
+ File stdout2 = new File(TEST_DIR, "stdout2");
+ File stderr2 = new File(TEST_DIR, "stderr2");
+ taskRunner2.launchJvmAndWait(null, vargs2, stdout2, stderr2, 100, workDir2,
+ null);
+ // join all the threads
+ killer.join();
+ jvmRunner.join();
+ launcher.join();
+ }
+
+
+ /**
+ * Create a bunch of tasks and use a special hash map to detect
+ * racy access to the various internal data structures of JvmManager.
+ * (Regression test for MAPREDUCE-2224)
+ */
+ @Test
+ public void testForRaces() throws Exception {
+ JvmManagerForType mapJvmManager = jvmManager
+ .getJvmManagerForType(TaskType.MAP);
+
+ // Sub out the HashMaps for maps that will detect racy access.
+ mapJvmManager.jvmToRunningTask = new RaceHashMap<JVMId, TaskRunner>();
+ mapJvmManager.runningTaskToJvm = new RaceHashMap<TaskRunner, JVMId>();
+ mapJvmManager.jvmIdToRunner = new RaceHashMap<JVMId, JvmRunner>();
+
+ // Launch a bunch of JVMs, but only allow MAP_SLOTS to run at once.
+ final ExecutorService exec = Executors.newFixedThreadPool(MAP_SLOTS);
+ final AtomicReference<Throwable> failed =
+ new AtomicReference<Throwable>();
+
+ for (int i = 0; i < MAP_SLOTS*5; i++) {
+ JobConf taskConf = new JobConf(ttConf);
+ TaskAttemptID attemptID = new TaskAttemptID("test", 0, TaskType.MAP, i, 0);
+ Task task = new MapTask(null, attemptID, i, null, 1);
+ task.setConf(taskConf);
+ TaskInProgress tip = tt.new TaskInProgress(task, taskConf);
+ File pidFile = new File(TEST_DIR, "pid_" + i);
+ final TaskRunner taskRunner = new MapTaskRunner(tip, tt, taskConf);
+ // launch a jvm which sleeps for 60 seconds
+ final Vector<String> vargs = new Vector<String>(2);
+ vargs.add(writeScript("script_" + i, "echo hi\n", pidFile).getAbsolutePath());
+ final File workDir = new File(TEST_DIR, "work_" + i);
+ workDir.mkdir();
+ final File stdout = new File(TEST_DIR, "stdout_" + i);
+ final File stderr = new File(TEST_DIR, "stderr_" + i);
+
+ // launch the process and wait in a thread, till it finishes
+ Runnable launcher = new Runnable() {
+ public void run() {
+ try {
+ taskRunner.launchJvmAndWait(null, vargs, stdout, stderr, 100,
+ workDir, null);
+ } catch (Throwable t) {
+ failed.compareAndSet(null, t);
+ exec.shutdownNow();
+ return;
+ }
+ }
+ };
+ exec.submit(launcher);
+ }
+
+ exec.shutdown();
+ exec.awaitTermination(3, TimeUnit.MINUTES);
+ if (failed.get() != null) {
+ throw new RuntimeException(failed.get());
+ }
+ }
+
+ /**
+ * HashMap which detects racy usage by sleeping during operations
+ * and checking that no other threads access the map while asleep.
+ */
+ static class RaceHashMap<K,V> extends HashMap<K,V> {
+ Object syncData = new Object();
+ RuntimeException userStack = null;
+ boolean raced = false;
+
+ private void checkInUse() {
+ synchronized (syncData) {
+ RuntimeException thisStack = new RuntimeException(Thread.currentThread().toString());
+
+ if (userStack != null && raced == false) {
+ RuntimeException other = userStack;
+ raced = true;
+ LOG.fatal("Race between two threads.");
+ LOG.fatal("First", thisStack);
+ LOG.fatal("Second", other);
+ throw new RuntimeException("Raced");
+ } else {
+ userStack = thisStack;
+ }
+ }
+ }
+
+ private void sleepABit() {
+ try {
+ Thread.sleep(60);
+ } catch (InterruptedException ie) {
+ Thread.currentThread().interrupt();
+ }
+ }
+
+ private void done() {
+ synchronized (syncData) {
+ userStack = null;
+ }
+ }
+
+ @Override
+ public V get(Object key) {
+ checkInUse();
+ try {
+ sleepABit();
+ return super.get(key);
+ } finally {
+ done();
+ }
+ }
+
+ @Override
+ public boolean containsKey(Object key) {
+ checkInUse();
+ try {
+ sleepABit();
+ return super.containsKey(key);
+ } finally {
+ done();
+ }
+ }
+
+ @Override
+ public V put(K key, V val) {
+ checkInUse();
+ try {
+ sleepABit();
+ return super.put(key, val);
+ } finally {
+ done();
+ }
+ }
+ }
+
+}
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJvmReuse.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJvmReuse.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJvmReuse.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJvmReuse.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestKeyValueTextInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestKeyValueTextInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestKeyValueTextInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestKeyValueTextInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestKillCompletedJob.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestKillCompletedJob.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestKillCompletedJob.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestKillCompletedJob.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcesses.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcesses.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcesses.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcesses.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcessesWithLinuxTaskController.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcessesWithLinuxTaskController.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcessesWithLinuxTaskController.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcessesWithLinuxTaskController.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLazyOutput.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLazyOutput.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLazyOutput.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLazyOutput.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLimitTasksPerJobTaskScheduler.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLimitTasksPerJobTaskScheduler.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLimitTasksPerJobTaskScheduler.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLimitTasksPerJobTaskScheduler.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLineRecordReader.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLineRecordReader.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLineRecordReader.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLineRecordReader.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLinuxTaskController.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLinuxTaskController.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLinuxTaskController.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLinuxTaskController.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLocalMRNotification.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLocalMRNotification.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLocalMRNotification.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLocalMRNotification.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLostTracker.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLostTracker.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLostTracker.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLostTracker.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMRServerPorts.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMRServerPorts.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMRServerPorts.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMRServerPorts.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMRWithDistributedCache.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapOutputType.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapOutputType.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapOutputType.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapOutputType.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapProgress.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapProgress.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapProgress.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapProgress.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapRed.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapRed.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapRed.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapRed.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapredHeartbeat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapredHeartbeat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapredHeartbeat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapredHeartbeat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapredSystemDir.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapredSystemDir.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapredSystemDir.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapredSystemDir.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRBringup.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRBringup.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRBringup.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRBringup.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRChildTask.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRChildTask.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRChildTask.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRChildTask.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRClasspath.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRClasspath.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRClasspath.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRClasspath.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRDFSCaching.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRDFSCaching.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRDFSCaching.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRDFSCaching.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRDFSSort.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRDFSSort.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRDFSSort.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRDFSSort.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRLocalFS.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRLocalFS.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRLocalFS.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRLocalFS.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFS.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFS.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFS.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFS.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFSWithDistinctUsers.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFSWithDistinctUsers.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFSWithDistinctUsers.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFSWithDistinctUsers.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMultiFileInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMultiFileInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMultiFileInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMultiFileInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMultiFileSplit.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMultiFileSplit.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMultiFileSplit.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMultiFileSplit.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMultipleLevelCaching.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMultipleLevelCaching.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMultipleLevelCaching.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMultipleLevelCaching.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMultipleTextOutputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMultipleTextOutputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMultipleTextOutputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMultipleTextOutputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestNetworkedJob.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestNetworkedJob.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestNetworkedJob.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestNetworkedJob.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestNodeHealthService.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestNodeHealthService.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestNodeHealthService.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestNodeHealthService.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestNodeRefresh.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestNodeRefresh.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestNodeRefresh.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestNodeRefresh.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestParallelInitialization.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestParallelInitialization.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestParallelInitialization.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestParallelInitialization.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestQueueAclsForCurrentUser.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestQueueAclsForCurrentUser.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestQueueAclsForCurrentUser.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestQueueAclsForCurrentUser.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestQueueManager.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestQueueManager.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestQueueManager.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestQueueManager.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerRefresh.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerRefresh.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerRefresh.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerRefresh.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithDeprecatedConf.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithDeprecatedConf.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithDeprecatedConf.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithDeprecatedConf.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithJobTracker.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithJobTracker.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithJobTracker.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithJobTracker.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestRackAwareTaskPlacement.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestRackAwareTaskPlacement.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestRackAwareTaskPlacement.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestRackAwareTaskPlacement.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestRecoveryManager.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestRecoveryManager.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestRecoveryManager.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestRecoveryManager.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestReduceFetch.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestReduceFetch.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestReduceFetch.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestReduceFetch.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestReduceFetchFromPartialMem.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestReduceFetchFromPartialMem.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestReduceFetchFromPartialMem.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestReduceFetchFromPartialMem.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestReduceTask.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestReduceTask.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestReduceTask.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestReduceTask.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestReporter.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestReporter.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestReporter.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestReporter.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestResourceEstimation.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestResourceEstimation.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestResourceEstimation.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestResourceEstimation.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSequenceFileAsBinaryInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSequenceFileAsBinaryInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSequenceFileAsBinaryInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSequenceFileAsBinaryInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSequenceFileAsBinaryOutputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSequenceFileAsBinaryOutputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSequenceFileAsBinaryOutputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSequenceFileAsBinaryOutputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSequenceFileAsTextInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSequenceFileAsTextInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSequenceFileAsTextInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSequenceFileAsTextInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSequenceFileInputFilter.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSequenceFileInputFilter.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSequenceFileInputFilter.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSequenceFileInputFilter.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSequenceFileInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSequenceFileInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSequenceFileInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSequenceFileInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSetupTaskScheduling.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSetupTaskScheduling.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSetupTaskScheduling.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSetupTaskScheduling.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSetupWorkDir.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSetupWorkDir.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSetupWorkDir.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSetupWorkDir.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSeveral.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSeveral.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSeveral.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSeveral.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestShuffleExceptionCount.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestShuffleExceptionCount.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestShuffleExceptionCount.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestShuffleExceptionCount.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestShuffleJobToken.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestShuffleJobToken.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestShuffleJobToken.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestShuffleJobToken.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSortedRanges.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSortedRanges.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSortedRanges.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSortedRanges.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSpecialCharactersInOutputPath.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSpecialCharactersInOutputPath.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSpecialCharactersInOutputPath.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSpecialCharactersInOutputPath.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSpeculativeExecution.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSpeculativeExecution.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSpeculativeExecution.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSpeculativeExecution.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestStatisticsCollector.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestStatisticsCollector.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestStatisticsCollector.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestStatisticsCollector.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTTResourceReporting.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTTResourceReporting.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTTResourceReporting.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTTResourceReporting.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskChildOptsParsing.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskChildOptsParsing.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskChildOptsParsing.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskChildOptsParsing.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskCommit.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskCommit.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskCommit.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskCommit.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskFail.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskFail.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskFail.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskFail.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskLauncher.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskLauncher.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskLauncher.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskLauncher.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskLauncherThreaded.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskLauncherThreaded.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskLauncherThreaded.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskLauncherThreaded.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskLimits.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskLimits.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskLimits.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskLimits.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskLogServlet.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskLogServlet.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskLogServlet.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskLogServlet.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskOutputSize.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskOutputSize.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskOutputSize.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskOutputSize.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskPerformanceSplits.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskPerformanceSplits.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskPerformanceSplits.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskPerformanceSplits.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskStatus.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskStatus.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskStatus.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskStatus.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerBlacklisting.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerBlacklisting.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerBlacklisting.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerBlacklisting.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerInstrumentation.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerInstrumentation.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerInstrumentation.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerInstrumentation.java
diff --git a/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java
new file mode 100644
index 0000000..d1a2d05
--- /dev/null
+++ b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java
@@ -0,0 +1,1075 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.LinkedHashMap;
+import java.util.TreeMap;
+import java.util.jar.JarOutputStream;
+import java.util.zip.ZipEntry;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.security.TokenCache;
+import org.apache.hadoop.mapreduce.server.tasktracker.Localizer;
+import org.apache.hadoop.mapreduce.util.MRAsyncDiskService;
+
+import static org.apache.hadoop.mapred.QueueManager.toFullPropertyName;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.mapred.JvmManager.JvmEnv;
+import org.apache.hadoop.mapred.TaskController.JobInitializationContext;
+import org.apache.hadoop.mapred.TaskController.TaskControllerContext;
+import org.apache.hadoop.mapred.TaskTracker.RunningJob;
+import org.apache.hadoop.mapred.TaskTracker.TaskInProgress;
+import org.apache.hadoop.mapred.UtilsForTests.InlineCleanupQueue;
+
+import junit.framework.TestCase;
+
+/**
+ * Test to verify localization of a job and localization of a task on a
+ * TaskTracker.
+ *
+ */
+public class TestTaskTrackerLocalization extends TestCase {
+
+ private static File TEST_ROOT_DIR =
+ new File(System.getProperty("test.build.data", "/tmp"));
+ private File ROOT_MAPRED_LOCAL_DIR;
+ private File HADOOP_LOG_DIR;
+ private static File PERMISSION_SCRIPT_DIR;
+ private static File PERMISSION_SCRIPT_FILE;
+ private static final String PERMISSION_SCRIPT_CONTENT = "ls -l -d $1 | " +
+ "awk '{print $1\":\"$3\":\"$4}'";
+
+ private int numLocalDirs = 6;
+ private static final Log LOG =
+ LogFactory.getLog(TestTaskTrackerLocalization.class);
+
+ protected TaskTracker tracker;
+ protected UserGroupInformation taskTrackerUGI;
+ protected TaskController taskController;
+ protected JobConf trackerFConf;
+ private JobConf localizedJobConf;
+ protected JobID jobId;
+ protected TaskAttemptID taskId;
+ protected Task task;
+ protected String[] localDirs;
+ protected static LocalDirAllocator lDirAlloc =
+ new LocalDirAllocator(MRConfig.LOCAL_DIR);
+ protected Path attemptWorkDir;
+ protected File[] attemptLogFiles;
+ protected JobConf localizedTaskConf;
+ private TaskInProgress tip;
+ private JobConf jobConf;
+ private File jobConfFile;
+
+ /**
+ * Dummy method in this base class. Only derived classes will define this
+ * method for checking if a test can be run.
+ */
+ protected boolean canRun() {
+ return true;
+ }
+
+ @Override
+ protected void setUp()
+ throws Exception {
+ if (!canRun()) {
+ return;
+ }
+ TEST_ROOT_DIR =
+ new File(System.getProperty("test.build.data", "/tmp"), getClass()
+ .getSimpleName());
+ if (!TEST_ROOT_DIR.exists()) {
+ TEST_ROOT_DIR.mkdirs();
+ }
+
+ ROOT_MAPRED_LOCAL_DIR = new File(TEST_ROOT_DIR, "mapred/local");
+ ROOT_MAPRED_LOCAL_DIR.mkdirs();
+
+ HADOOP_LOG_DIR = new File(TEST_ROOT_DIR, "logs");
+ HADOOP_LOG_DIR.mkdir();
+ System.setProperty("hadoop.log.dir", HADOOP_LOG_DIR.getAbsolutePath());
+
+ trackerFConf = new JobConf();
+
+ trackerFConf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
+ localDirs = new String[numLocalDirs];
+ for (int i = 0; i < numLocalDirs; i++) {
+ localDirs[i] = new File(ROOT_MAPRED_LOCAL_DIR, "0_" + i).getPath();
+ }
+ trackerFConf.setStrings(MRConfig.LOCAL_DIR, localDirs);
+ trackerFConf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
+
+ // Create the job configuration file. Same as trackerConf in this test.
+ jobConf = new JobConf(trackerFConf);
+ // Set job view ACLs in conf sothat validation of contents of jobACLsFile
+ // can be done against this value. Have both users and groups
+ String jobViewACLs = "user1,user2, group1,group2";
+ jobConf.set(MRJobConfig.JOB_ACL_VIEW_JOB, jobViewACLs);
+
+ jobConf.setInt(MRJobConfig.USER_LOG_RETAIN_HOURS, 0);
+ jobConf.setUser(getJobOwner().getShortUserName());
+
+ String queue = "default";
+ // set job queue name in job conf
+ jobConf.setQueueName(queue);
+ // Set queue admins acl in job conf similar to what JobClient does so that
+ // it goes into job conf also.
+ jobConf.set(toFullPropertyName(queue,
+ QueueACL.ADMINISTER_JOBS.getAclName()),
+ "qAdmin1,qAdmin2 qAdminsGroup1,qAdminsGroup2");
+
+ Job job = Job.getInstance(jobConf);
+ String jtIdentifier = "200907202331";
+ jobId = new JobID(jtIdentifier, 1);
+
+ // JobClient uploads the job jar to the file system and sets it in the
+ // jobConf.
+ uploadJobJar(job);
+
+ // JobClient uploads the jobConf to the file system.
+ jobConfFile = uploadJobConf(job.getConfiguration());
+
+ // create jobTokens file
+ uploadJobTokensFile();
+
+ taskTrackerUGI = UserGroupInformation.getCurrentUser();
+ startTracker();
+
+ // Set up the task to be localized
+ taskId =
+ new TaskAttemptID(jtIdentifier, jobId.getId(), TaskType.MAP, 1, 0);
+ createTask();
+
+ // mimic register task
+ // create the tip
+ tip = tracker.new TaskInProgress(task, trackerFConf);
+ }
+
+ private void startTracker() throws IOException {
+ // Set up the TaskTracker
+ tracker = new TaskTracker();
+ tracker.setConf(trackerFConf);
+ tracker.setTaskLogCleanupThread(new UserLogCleaner(trackerFConf));
+ initializeTracker();
+ }
+
+ private void initializeTracker() throws IOException {
+ tracker.setIndexCache(new IndexCache(trackerFConf));
+ tracker.setTaskMemoryManagerEnabledFlag();
+
+ // for test case system FS is the local FS
+ tracker.systemFS = FileSystem.getLocal(trackerFConf);
+ tracker.setLocalFileSystem(tracker.systemFS);
+ tracker.systemDirectory = new Path(TEST_ROOT_DIR.getAbsolutePath());
+
+ tracker.runningTasks = new LinkedHashMap<TaskAttemptID, TaskInProgress>();
+ tracker.runningJobs = new TreeMap<JobID, RunningJob>();
+ tracker.setAsyncDiskService(new MRAsyncDiskService(trackerFConf));
+ tracker.getAsyncDiskService().cleanupAllVolumes();
+
+ // Set up TaskTracker instrumentation
+ tracker.setTaskTrackerInstrumentation(
+ TaskTracker.createInstrumentation(tracker, trackerFConf));
+
+ // setup task controller
+ taskController = createTaskController();
+ taskController.setConf(trackerFConf);
+ taskController.setup();
+ tracker.setTaskController(taskController);
+ tracker.setLocalizer(new Localizer(tracker.getLocalFileSystem(), localDirs,
+ taskController));
+ }
+
+ protected TaskController createTaskController() {
+ return new DefaultTaskController();
+ }
+
+ private void createTask()
+ throws IOException {
+ task = new MapTask(jobConfFile.toURI().toString(), taskId, 1, null, 1);
+ task.setConf(jobConf); // Set conf. Set user name in particular.
+ task.setUser(jobConf.getUser());
+ }
+
+ protected UserGroupInformation getJobOwner() throws IOException {
+ return UserGroupInformation.getCurrentUser();
+ }
+
+ /**
+ * static block setting up the permission script which would be used by the
+ * checkFilePermissions
+ */
+ static {
+ PERMISSION_SCRIPT_DIR = new File(TEST_ROOT_DIR, "permission_script_dir");
+ PERMISSION_SCRIPT_FILE = new File(PERMISSION_SCRIPT_DIR, "getperms.sh");
+
+ if(PERMISSION_SCRIPT_FILE.exists()) {
+ PERMISSION_SCRIPT_FILE.delete();
+ }
+
+ if(PERMISSION_SCRIPT_DIR.exists()) {
+ PERMISSION_SCRIPT_DIR.delete();
+ }
+
+ PERMISSION_SCRIPT_DIR.mkdir();
+
+ try {
+ PrintWriter writer = new PrintWriter(PERMISSION_SCRIPT_FILE);
+ writer.write(PERMISSION_SCRIPT_CONTENT);
+ writer.close();
+ } catch (FileNotFoundException fe) {
+ fail();
+ }
+ PERMISSION_SCRIPT_FILE.setExecutable(true, true);
+ }
+
+ /**
+ * @param job
+ * @throws IOException
+ * @throws FileNotFoundException
+ */
+ private void uploadJobJar(Job job)
+ throws IOException,
+ FileNotFoundException {
+ File jobJarFile = new File(TEST_ROOT_DIR, "jobjar-on-dfs.jar");
+ JarOutputStream jstream =
+ new JarOutputStream(new FileOutputStream(jobJarFile));
+ ZipEntry ze = new ZipEntry("lib/lib1.jar");
+ jstream.putNextEntry(ze);
+ jstream.closeEntry();
+ ze = new ZipEntry("lib/lib2.jar");
+ jstream.putNextEntry(ze);
+ jstream.closeEntry();
+ jstream.finish();
+ jstream.close();
+ job.setJar(jobJarFile.toURI().toString());
+ }
+
+ /**
+ * @param conf
+ * @return
+ * @throws FileNotFoundException
+ * @throws IOException
+ */
+ protected File uploadJobConf(Configuration conf)
+ throws FileNotFoundException,
+ IOException {
+ File jobConfFile = new File(TEST_ROOT_DIR, "jobconf-on-dfs.xml");
+ FileOutputStream out = new FileOutputStream(jobConfFile);
+ conf.writeXml(out);
+ out.close();
+ return jobConfFile;
+ }
+
+ /**
+ * create fake JobTokens file
+ * @return
+ * @throws IOException
+ */
+ protected void uploadJobTokensFile() throws IOException {
+
+ File dir = new File(TEST_ROOT_DIR, jobId.toString());
+ if(!dir.exists())
+ assertTrue("faild to create dir="+dir.getAbsolutePath(), dir.mkdirs());
+ // writing empty file, we don't need the keys for this test
+ new Credentials().writeTokenStorageFile(new Path("file:///" + dir,
+ TokenCache.JOB_TOKEN_HDFS_FILE), new Configuration());
+ }
+
+ @Override
+ protected void tearDown()
+ throws Exception {
+ if (!canRun()) {
+ return;
+ }
+ FileUtil.fullyDelete(TEST_ROOT_DIR);
+ }
+
+ protected static String[] getFilePermissionAttrs(String path)
+ throws IOException {
+ String[] command = {"bash",PERMISSION_SCRIPT_FILE.getAbsolutePath(), path};
+ String output=Shell.execCommand(command);
+ return output.split(":|\n");
+ }
+
+
+ /**
+ * Utility method to check permission of a given path. Requires the permission
+ * script directory to be setup in order to call.
+ *
+ *
+ * @param path
+ * @param expectedPermissions
+ * @param expectedOwnerUser
+ * @param expectedOwnerGroup
+ * @throws IOException
+ */
+ static void checkFilePermissions(String path, String expectedPermissions,
+ String expectedOwnerUser, String expectedOwnerGroup)
+ throws IOException {
+ String[] attrs = getFilePermissionAttrs(path);
+ assertTrue("File attrs length is not 3 but " + attrs.length,
+ attrs.length == 3);
+ assertTrue("Path " + path + " has the permissions " + attrs[0]
+ + " instead of the expected " + expectedPermissions, attrs[0]
+ .equals(expectedPermissions));
+ assertTrue("Path " + path + " is user owned not by " + expectedOwnerUser
+ + " but by " + attrs[1], attrs[1].equals(expectedOwnerUser));
+ assertTrue("Path " + path + " is group owned not by " + expectedOwnerGroup
+ + " but by " + attrs[2], attrs[2].equals(expectedOwnerGroup));
+ }
+
+ /**
+ * Verify the task-controller's setup functionality
+ *
+ * @throws IOException
+ */
+ public void testTaskControllerSetup()
+ throws IOException {
+ if (!canRun()) {
+ return;
+ }
+ // Task-controller is already set up in the test's setup method. Now verify.
+ for (String localDir : localDirs) {
+
+ // Verify the local-dir itself.
+ File lDir = new File(localDir);
+ assertTrue("localDir " + lDir + " doesn't exists!", lDir.exists());
+ checkFilePermissions(lDir.getAbsolutePath(), "drwxr-xr-x", task
+ .getUser(), taskTrackerUGI.getGroupNames()[0]);
+ }
+
+ // Verify the pemissions on the userlogs dir
+ File taskLog = TaskLog.getUserLogDir();
+ checkFilePermissions(taskLog.getAbsolutePath(), "drwxr-xr-x", task
+ .getUser(), taskTrackerUGI.getGroupNames()[0]);
+ }
+
+ /**
+ * Test the localization of a user on the TT.
+ *
+ * @throws IOException
+ */
+ public void testUserLocalization()
+ throws IOException {
+ if (!canRun()) {
+ return;
+ }
+ // /////////// The main method being tested
+ tracker.getLocalizer().initializeUserDirs(task.getUser());
+ // ///////////
+
+ // Check the directory structure and permissions
+ checkUserLocalization();
+
+ // For the sake of testing re-entrancy of initializeUserDirs(), we remove
+ // the user directories now and make sure that further calls of the method
+ // don't create directories any more.
+ for (String dir : localDirs) {
+ File userDir = new File(dir, TaskTracker.getUserDir(task.getUser()));
+ if (!FileUtil.fullyDelete(userDir)) {
+ throw new IOException("Uanble to delete " + userDir);
+ }
+ }
+
+ // Now call the method again.
+ tracker.getLocalizer().initializeUserDirs(task.getUser());
+
+ // Files should not be created now and so shouldn't be there anymore.
+ for (String dir : localDirs) {
+ File userDir = new File(dir, TaskTracker.getUserDir(task.getUser()));
+ assertFalse("Unexpectedly, user-dir " + userDir.getAbsolutePath()
+ + " exists!", userDir.exists());
+ }
+ }
+
+ protected void checkUserLocalization()
+ throws IOException {
+ for (String dir : localDirs) {
+
+ File localDir = new File(dir);
+ assertTrue(MRConfig.LOCAL_DIR + localDir + " isn'task created!",
+ localDir.exists());
+
+ File taskTrackerSubDir = new File(localDir, TaskTracker.SUBDIR);
+ assertTrue("taskTracker sub-dir in the local-dir " + localDir
+ + "is not created!", taskTrackerSubDir.exists());
+
+ File userDir = new File(taskTrackerSubDir, task.getUser());
+ assertTrue("user-dir in taskTrackerSubdir " + taskTrackerSubDir
+ + "is not created!", userDir.exists());
+ checkFilePermissions(userDir.getAbsolutePath(), "drwx------", task
+ .getUser(), taskTrackerUGI.getGroupNames()[0]);
+
+ File jobCache = new File(userDir, TaskTracker.JOBCACHE);
+ assertTrue("jobcache in the userDir " + userDir + " isn't created!",
+ jobCache.exists());
+ checkFilePermissions(jobCache.getAbsolutePath(), "drwx------", task
+ .getUser(), taskTrackerUGI.getGroupNames()[0]);
+
+ // Verify the distributed cache dir.
+ File distributedCacheDir =
+ new File(localDir, TaskTracker
+ .getPrivateDistributedCacheDir(task.getUser()));
+ assertTrue("distributed cache dir " + distributedCacheDir
+ + " doesn't exists!", distributedCacheDir.exists());
+ checkFilePermissions(distributedCacheDir.getAbsolutePath(),
+ "drwx------", task.getUser(), taskTrackerUGI.getGroupNames()[0]);
+ }
+ }
+
+ /**
+ * Test job localization on a TT. Tests localization of job.xml, job.jar and
+ * corresponding setting of configuration. Also test
+ * {@link TaskController#initializeJob(JobInitializationContext)}
+ *
+ * @throws IOException
+ */
+ public void testJobLocalization()
+ throws Exception {
+ if (!canRun()) {
+ return;
+ }
+ TaskTracker.RunningJob rjob = tracker.localizeJob(tip);
+ localizedJobConf = rjob.getJobConf();
+
+ checkJobLocalization();
+ }
+
+ /**
+ * Test that, if the job log dir can't be created, the job will fail
+ * during localization rather than at the time when the task itself
+ * tries to write into it.
+ */
+ public void testJobLocalizationFailsIfLogDirUnwritable()
+ throws Exception {
+ if (!canRun()) {
+ return;
+ }
+
+ File logDir = TaskLog.getJobDir(jobId);
+ File logDirParent = logDir.getParentFile();
+
+ try {
+ assertTrue(logDirParent.mkdirs() || logDirParent.isDirectory());
+ FileUtil.fullyDelete(logDir);
+ FileUtil.chmod(logDirParent.getAbsolutePath(), "000");
+
+ tracker.localizeJob(tip);
+ fail("No exception");
+ } catch (IOException ioe) {
+ LOG.info("Got exception", ioe);
+ assertTrue(ioe.getMessage().contains("Could not create job user log"));
+ } finally {
+ // Put it back just to be safe
+ FileUtil.chmod(logDirParent.getAbsolutePath(), "755");
+ }
+ }
+
+ protected void checkJobLocalization()
+ throws IOException {
+ // Check the directory structure
+ for (String dir : localDirs) {
+
+ File localDir = new File(dir);
+ File taskTrackerSubDir = new File(localDir, TaskTracker.SUBDIR);
+ File userDir = new File(taskTrackerSubDir, task.getUser());
+ File jobCache = new File(userDir, TaskTracker.JOBCACHE);
+
+ File jobDir = new File(jobCache, jobId.toString());
+ assertTrue("job-dir in " + jobCache + " isn't created!", jobDir.exists());
+
+ // check the private permissions on the job directory
+ checkFilePermissions(jobDir.getAbsolutePath(), "drwx------", task
+ .getUser(), taskTrackerUGI.getGroupNames()[0]);
+ }
+
+ // check the localization of job.xml
+ assertTrue("job.xml is not localized on this TaskTracker!!", lDirAlloc
+ .getLocalPathToRead(TaskTracker.getLocalJobConfFile(task.getUser(),
+ jobId.toString()), trackerFConf) != null);
+
+ // check the localization of job.jar
+ Path jarFileLocalized =
+ lDirAlloc.getLocalPathToRead(TaskTracker.getJobJarFile(task.getUser(),
+ jobId.toString()), trackerFConf);
+ assertTrue("job.jar is not localized on this TaskTracker!!",
+ jarFileLocalized != null);
+ assertTrue("lib/lib1.jar is not unjarred on this TaskTracker!!", new File(
+ jarFileLocalized.getParent() + Path.SEPARATOR + "lib/lib1.jar")
+ .exists());
+ assertTrue("lib/lib2.jar is not unjarred on this TaskTracker!!", new File(
+ jarFileLocalized.getParent() + Path.SEPARATOR + "lib/lib2.jar")
+ .exists());
+
+ // check the creation of job work directory
+ assertTrue("job-work dir is not created on this TaskTracker!!", lDirAlloc
+ .getLocalPathToRead(TaskTracker.getJobWorkDir(task.getUser(), jobId
+ .toString()), trackerFConf) != null);
+
+ // Check the setting of mapreduce.job.local.dir and job.jar which will eventually be
+ // used by the user's task
+ boolean jobLocalDirFlag = false, mapredJarFlag = false;
+ String localizedJobLocalDir =
+ localizedJobConf.get(TaskTracker.JOB_LOCAL_DIR);
+ String localizedJobJar = localizedJobConf.getJar();
+ for (String localDir : localizedJobConf.getStrings(MRConfig.LOCAL_DIR)) {
+ if (localizedJobLocalDir.equals(localDir + Path.SEPARATOR
+ + TaskTracker.getJobWorkDir(task.getUser(), jobId.toString()))) {
+ jobLocalDirFlag = true;
+ }
+ if (localizedJobJar.equals(localDir + Path.SEPARATOR
+ + TaskTracker.getJobJarFile(task.getUser(), jobId.toString()))) {
+ mapredJarFlag = true;
+ }
+ }
+ assertTrue(TaskTracker.JOB_LOCAL_DIR
+ + " is not set properly to the target users directory : "
+ + localizedJobLocalDir, jobLocalDirFlag);
+ assertTrue(
+ "mapreduce.job.jar is not set properly to the target users directory : "
+ + localizedJobJar, mapredJarFlag);
+
+ // check job user-log directory permissions
+ File jobLogDir = TaskLog.getJobDir(jobId);
+ assertTrue("job log directory " + jobLogDir + " does not exist!", jobLogDir
+ .exists());
+ checkFilePermissions(jobLogDir.toString(), "drwx------", task.getUser(),
+ taskTrackerUGI.getGroupNames()[0]);
+
+ // Make sure that the job ACLs file job-acls.xml exists in job userlog dir
+ File jobACLsFile = new File(jobLogDir, TaskTracker.jobACLsFile);
+ assertTrue("JobACLsFile is missing in the job userlog dir " + jobLogDir,
+ jobACLsFile.exists());
+
+ // With default task controller, the job-acls.xml file is owned by TT and
+ // permissions are 700
+ checkFilePermissions(jobACLsFile.getAbsolutePath(), "-rw-------",
+ taskTrackerUGI.getShortUserName(), taskTrackerUGI.getGroupNames()[0]);
+
+ validateJobACLsFileContent();
+ }
+
+ // Validate the contents of jobACLsFile ( i.e. user name, job-view-acl, queue
+ // name and queue-admins-acl ).
+ protected void validateJobACLsFileContent() {
+ JobConf jobACLsConf = TaskLogServlet.getConfFromJobACLsFile(jobId);
+
+ assertTrue(jobACLsConf.get("user.name").equals(
+ localizedJobConf.getUser()));
+ assertTrue(jobACLsConf.get(MRJobConfig.JOB_ACL_VIEW_JOB).
+ equals(localizedJobConf.get(MRJobConfig.JOB_ACL_VIEW_JOB)));
+
+ String queue = localizedJobConf.getQueueName();
+ assertTrue(queue.equalsIgnoreCase(jobACLsConf.getQueueName()));
+
+ String qACLName = toFullPropertyName(queue,
+ QueueACL.ADMINISTER_JOBS.getAclName());
+ assertTrue(jobACLsConf.get(qACLName).equals(
+ localizedJobConf.get(qACLName)));
+ }
+
+ /**
+ * Test task localization on a TT.
+ *
+ * @throws IOException
+ */
+ public void testTaskLocalization()
+ throws Exception {
+ if (!canRun()) {
+ return;
+ }
+ TaskTracker.RunningJob rjob = tracker.localizeJob(tip);
+ localizedJobConf = rjob.getJobConf();
+ initializeTask();
+
+ checkTaskLocalization();
+ }
+
+ private void initializeTask() throws IOException {
+ tip.setJobConf(localizedJobConf);
+
+ // ////////// The central method being tested
+ tip.localizeTask(task);
+ // //////////
+
+ // check the functionality of localizeTask
+ for (String dir : trackerFConf.getStrings(MRConfig.LOCAL_DIR)) {
+ File attemptDir =
+ new File(dir, TaskTracker.getLocalTaskDir(task.getUser(), jobId
+ .toString(), taskId.toString(), task.isTaskCleanupTask()));
+ assertTrue("attempt-dir " + attemptDir + " in localDir " + dir
+ + " is not created!!", attemptDir.exists());
+ }
+
+ attemptWorkDir =
+ lDirAlloc.getLocalPathToRead(TaskTracker.getTaskWorkDir(
+ task.getUser(), task.getJobID().toString(), task.getTaskID()
+ .toString(), task.isTaskCleanupTask()), trackerFConf);
+ assertTrue("atttempt work dir for " + taskId.toString()
+ + " is not created in any of the configured dirs!!",
+ attemptWorkDir != null);
+
+ TaskRunner runner = new MapTaskRunner(tip, tracker, tip.getJobConf());
+ tip.setTaskRunner(runner);
+
+ // /////// Few more methods being tested
+ runner.setupChildTaskConfiguration(lDirAlloc);
+ TaskRunner.createChildTmpDir(new File(attemptWorkDir.toUri().getPath()),
+ localizedJobConf);
+ attemptLogFiles = runner.prepareLogFiles(task.getTaskID(),
+ task.isTaskCleanupTask());
+
+ // Make sure the task-conf file is created
+ Path localTaskFile =
+ lDirAlloc.getLocalPathToRead(TaskTracker.getTaskConfFile(task
+ .getUser(), task.getJobID().toString(), task.getTaskID()
+ .toString(), task.isTaskCleanupTask()), trackerFConf);
+ assertTrue("Task conf file " + localTaskFile.toString()
+ + " is not created!!", new File(localTaskFile.toUri().getPath())
+ .exists());
+
+ // /////// One more method being tested. This happens in child space.
+ localizedTaskConf = new JobConf(localTaskFile);
+ TaskRunner.setupChildMapredLocalDirs(task, localizedTaskConf);
+ // ///////
+
+ // Initialize task via TaskController
+ TaskControllerContext taskContext =
+ new TaskController.TaskControllerContext();
+ taskContext.env =
+ new JvmEnv(null, null, null, null, -1, new File(localizedJobConf
+ .get(TaskTracker.JOB_LOCAL_DIR)), null, localizedJobConf);
+ taskContext.task = task;
+ // /////////// The method being tested
+ taskController.initializeTask(taskContext);
+ // ///////////
+ }
+
+ protected void checkTaskLocalization()
+ throws IOException {
+ // Make sure that the mapreduce.cluster.local.dir is sandboxed
+ for (String childMapredLocalDir : localizedTaskConf
+ .getStrings(MRConfig.LOCAL_DIR)) {
+ assertTrue("Local dir " + childMapredLocalDir + " is not sandboxed !!",
+ childMapredLocalDir.endsWith(TaskTracker.getLocalTaskDir(task
+ .getUser(), jobId.toString(), taskId.toString(),
+ task.isTaskCleanupTask())));
+ }
+
+ // Make sure task task.getJobFile is changed and pointed correctly.
+ assertTrue(task.getJobFile().endsWith(
+ TaskTracker.getTaskConfFile(task.getUser(), jobId.toString(), taskId
+ .toString(), task.isTaskCleanupTask())));
+
+ // Make sure that the tmp directories are created
+ assertTrue("tmp dir is not created in workDir "
+ + attemptWorkDir.toUri().getPath(), new File(attemptWorkDir.toUri()
+ .getPath(), "tmp").exists());
+
+ // Make sure that the logs are setup properly
+ File logDir = TaskLog.getAttemptDir(taskId, task.isTaskCleanupTask());
+ assertTrue("task's log dir " + logDir.toString() + " doesn't exist!",
+ logDir.exists());
+ checkFilePermissions(logDir.getAbsolutePath(), "drwx------", task
+ .getUser(), taskTrackerUGI.getGroupNames()[0]);
+
+ File expectedStdout = new File(logDir, TaskLog.LogName.STDOUT.toString());
+ assertTrue("stdout log file is improper. Expected : "
+ + expectedStdout.toString() + " Observed : "
+ + attemptLogFiles[0].toString(), expectedStdout.toString().equals(
+ attemptLogFiles[0].toString()));
+ File expectedStderr =
+ new File(logDir, Path.SEPARATOR + TaskLog.LogName.STDERR.toString());
+ assertTrue("stderr log file is improper. Expected : "
+ + expectedStderr.toString() + " Observed : "
+ + attemptLogFiles[1].toString(), expectedStderr.toString().equals(
+ attemptLogFiles[1].toString()));
+ }
+
+ /**
+ * Create a file in the given dir and set permissions r_xr_xr_x sothat no one
+ * can delete it directly(without doing chmod).
+ * Creates dir/subDir and dir/subDir/file
+ */
+ static void createFileAndSetPermissions(JobConf jobConf, Path dir)
+ throws IOException {
+ Path subDir = new Path(dir, "subDir");
+ FileSystem fs = FileSystem.getLocal(jobConf);
+ fs.mkdirs(subDir);
+ Path p = new Path(subDir, "file");
+ java.io.DataOutputStream out = fs.create(p);
+ out.writeBytes("dummy input");
+ out.close();
+ // no write permission for subDir and subDir/file
+ try {
+ int ret = 0;
+ if((ret = FileUtil.chmod(subDir.toUri().getPath(), "a=rx", true)) != 0) {
+ LOG.warn("chmod failed for " + subDir + ";retVal=" + ret);
+ }
+ } catch(InterruptedException e) {
+ LOG.warn("Interrupted while doing chmod for " + subDir);
+ }
+ }
+
+ /**
+ * Validates the removal of $taskid and $tasid/work under mapred-local-dir
+ * in cases where those directories cannot be deleted without adding
+ * write permission to the newly created directories under $taskid and
+ * $taskid/work
+ * Also see createFileAndSetPermissions for details
+ */
+ void validateRemoveTaskFiles(boolean needCleanup, boolean jvmReuse,
+ TaskInProgress tip) throws IOException {
+ // create files and set permissions 555. Verify if task controller sets
+ // the permissions for TT to delete the taskDir or workDir
+ String dir = (!needCleanup || jvmReuse) ?
+ TaskTracker.getTaskWorkDir(task.getUser(), task.getJobID().toString(),
+ taskId.toString(), task.isTaskCleanupTask())
+ : TaskTracker.getLocalTaskDir(task.getUser(), task.getJobID().toString(),
+ taskId.toString(), task.isTaskCleanupTask());
+
+ Path[] paths = tracker.getLocalFiles(localizedJobConf, dir);
+ assertTrue("No paths found", paths.length > 0);
+ for (Path p : paths) {
+ if (tracker.getLocalFileSystem().exists(p)) {
+ createFileAndSetPermissions(localizedJobConf, p);
+ }
+ }
+
+ InlineCleanupQueue cleanupQueue = new InlineCleanupQueue();
+ tracker.setCleanupThread(cleanupQueue);
+
+ tip.removeTaskFiles(needCleanup, taskId);
+
+ if (jvmReuse) {
+ // work dir should still exist and cleanup queue should be empty
+ assertTrue("cleanup queue is not empty after removeTaskFiles() in case "
+ + "of jvm reuse.", cleanupQueue.isQueueEmpty());
+ boolean workDirExists = false;
+ for (Path p : paths) {
+ if (tracker.getLocalFileSystem().exists(p)) {
+ workDirExists = true;
+ }
+ }
+ assertTrue("work dir does not exist in case of jvm reuse", workDirExists);
+
+ // now try to delete the work dir and verify that there are no stale paths
+ JvmManager.deleteWorkDir(tracker, task);
+ }
+
+ assertTrue("Some task files are not deleted!! Number of stale paths is "
+ + cleanupQueue.stalePaths.size(), cleanupQueue.stalePaths.size() == 0);
+ }
+
+ /**
+ * Validates if task cleanup is done properly for a succeeded task
+ * @throws IOException
+ */
+ public void testTaskFilesRemoval()
+ throws Exception {
+ if (!canRun()) {
+ return;
+ }
+ testTaskFilesRemoval(false, false);// no needCleanup; no jvmReuse
+ }
+
+ /**
+ * Validates if task cleanup is done properly for a task that is not succeeded
+ * @throws IOException
+ */
+ public void testFailedTaskFilesRemoval()
+ throws Exception {
+ if (!canRun()) {
+ return;
+ }
+ testTaskFilesRemoval(true, false);// needCleanup; no jvmReuse
+
+ // initialize a cleanupAttempt for the task.
+ task.setTaskCleanupTask();
+ // localize task cleanup attempt
+ initializeTask();
+ checkTaskLocalization();
+
+ // verify the cleanup of cleanup attempt.
+ testTaskFilesRemoval(true, false);// needCleanup; no jvmReuse
+ }
+
+ /**
+ * Validates if task cleanup is done properly for a succeeded task
+ * @throws IOException
+ */
+ public void testTaskFilesRemovalWithJvmUse()
+ throws Exception {
+ if (!canRun()) {
+ return;
+ }
+ testTaskFilesRemoval(false, true);// no needCleanup; jvmReuse
+ }
+
+ /**
+ * Validates if task cleanup is done properly
+ */
+ private void testTaskFilesRemoval(boolean needCleanup, boolean jvmReuse)
+ throws Exception {
+ // Localize job and localize task.
+ TaskTracker.RunningJob rjob = tracker.localizeJob(tip);
+ localizedJobConf = rjob.getJobConf();
+ if (jvmReuse) {
+ localizedJobConf.setNumTasksToExecutePerJvm(2);
+ }
+ initializeTask();
+
+ // TODO: Let the task run and create files.
+
+ // create files and set permissions 555. Verify if task controller sets
+ // the permissions for TT to delete the task dir or work dir properly
+ validateRemoveTaskFiles(needCleanup, jvmReuse, tip);
+ }
+
+ /**
+ * Test userlogs cleanup.
+ *
+ * @throws IOException
+ */
+ private void verifyUserLogsRemoval()
+ throws IOException {
+ // verify user logs cleanup
+ File jobUserLogDir = TaskLog.getJobDir(jobId);
+ // Logs should be there before cleanup.
+ assertTrue("Userlogs dir " + jobUserLogDir + " is not present as expected!!",
+ jobUserLogDir.exists());
+ tracker.purgeJob(new KillJobAction(jobId));
+ tracker.getTaskLogCleanupThread().processCompletedJobs();
+
+ // Logs should be gone after cleanup.
+ assertFalse("Userlogs dir " + jobUserLogDir + " is not deleted as expected!!",
+ jobUserLogDir.exists());
+ }
+
+ /**
+ * Test job cleanup by doing the following
+ * - create files with no write permissions to TT under job-work-dir
+ * - create files with no write permissions to TT under task-work-dir
+ */
+ public void testJobFilesRemoval() throws IOException, InterruptedException {
+ if (!canRun()) {
+ return;
+ }
+
+ LOG.info("Running testJobCleanup()");
+ // Localize job and localize task.
+ TaskTracker.RunningJob rjob = tracker.localizeJob(tip);
+ localizedJobConf = rjob.getJobConf();
+
+ // Set an inline cleanup queue
+ InlineCleanupQueue cleanupQueue = new InlineCleanupQueue();
+ tracker.setCleanupThread(cleanupQueue);
+
+ // Create a file in job's work-dir with 555
+ String jobWorkDir =
+ TaskTracker.getJobWorkDir(task.getUser(), task.getJobID().toString());
+ Path[] jPaths = tracker.getLocalFiles(localizedJobConf, jobWorkDir);
+ assertTrue("No paths found for job", jPaths.length > 0);
+ for (Path p : jPaths) {
+ if (tracker.getLocalFileSystem().exists(p)) {
+ createFileAndSetPermissions(localizedJobConf, p);
+ }
+ }
+
+ // Initialize task dirs
+ tip.setJobConf(localizedJobConf);
+ tip.localizeTask(task);
+
+ // Create a file in task local dir with 555
+ // this is to simply test the case where the jvm reuse is enabled and some
+ // files in task-attempt-local-dir are left behind to be cleaned up when the
+ // job finishes.
+ String taskLocalDir =
+ TaskTracker.getLocalTaskDir(task.getUser(), task.getJobID().toString(),
+ task.getTaskID().toString(), false);
+ Path[] tPaths = tracker.getLocalFiles(localizedJobConf, taskLocalDir);
+ assertTrue("No paths found for task", tPaths.length > 0);
+ for (Path p : tPaths) {
+ if (tracker.getLocalFileSystem().exists(p)) {
+ createFileAndSetPermissions(localizedJobConf, p);
+ }
+ }
+
+ // remove the job work dir
+ tracker.removeJobFiles(task.getUser(), task.getJobID());
+
+ // check the task-local-dir
+ boolean tLocalDirExists = false;
+ for (Path p : tPaths) {
+ if (tracker.getLocalFileSystem().exists(p)) {
+ tLocalDirExists = true;
+ }
+ }
+ assertFalse("Task " + task.getTaskID() + " local dir exists after cleanup",
+ tLocalDirExists);
+
+ // Verify that the TaskTracker (via the task-controller) cleans up the dirs.
+ // check the job-work-dir
+ boolean jWorkDirExists = false;
+ for (Path p : jPaths) {
+ if (tracker.getLocalFileSystem().exists(p)) {
+ jWorkDirExists = true;
+ }
+ }
+ assertFalse("Job " + task.getJobID() + " work dir exists after cleanup",
+ jWorkDirExists);
+ // Test userlogs cleanup.
+ verifyUserLogsRemoval();
+
+ // Check that the empty $mapred.local.dir/taskTracker/$user dirs are still
+ // there.
+ for (String localDir : localDirs) {
+ Path userDir =
+ new Path(localDir, TaskTracker.getUserDir(task.getUser()));
+ assertTrue("User directory " + userDir + " is not present!!",
+ tracker.getLocalFileSystem().exists(userDir));
+ }
+ }
+
+ /**
+ * Tests TaskTracker restart after the localization.
+ *
+ * This tests the following steps:
+ *
+ * Localize Job, initialize a task.
+ * Then restart the Tracker.
+ * launch a cleanup attempt for the task.
+ *
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public void testTrackerRestart() throws IOException, InterruptedException {
+ if (!canRun()) {
+ return;
+ }
+
+ // Localize job and localize task.
+ TaskTracker.RunningJob rjob = tracker.localizeJob(tip);
+ localizedJobConf = rjob.getJobConf();
+ initializeTask();
+
+ // imitate tracker restart
+ startTracker();
+
+ // create a task cleanup attempt
+ createTask();
+ task.setTaskCleanupTask();
+ // register task
+ tip = tracker.new TaskInProgress(task, trackerFConf);
+
+ // localize the job again.
+ rjob = tracker.localizeJob(tip);
+ localizedJobConf = rjob.getJobConf();
+ checkJobLocalization();
+
+ // localize task cleanup attempt
+ initializeTask();
+ checkTaskLocalization();
+ }
+
+ /**
+ * Tests TaskTracker re-init after the localization.
+ *
+ * This tests the following steps:
+ *
+ * Localize Job, initialize a task.
+ * Then reinit the Tracker.
+ * launch a cleanup attempt for the task.
+ *
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public void testTrackerReinit() throws IOException, InterruptedException {
+ if (!canRun()) {
+ return;
+ }
+
+ // Localize job and localize task.
+ TaskTracker.RunningJob rjob = tracker.localizeJob(tip);
+ localizedJobConf = rjob.getJobConf();
+ initializeTask();
+
+ // imitate tracker reinit
+ initializeTracker();
+
+ // create a task cleanup attempt
+ createTask();
+ task.setTaskCleanupTask();
+ // register task
+ tip = tracker.new TaskInProgress(task, trackerFConf);
+
+ // localize the job again.
+ rjob = tracker.localizeJob(tip);
+ localizedJobConf = rjob.getJobConf();
+ checkJobLocalization();
+
+ // localize task cleanup attempt
+ initializeTask();
+ checkTaskLocalization();
+ }
+
+ /**
+ * Localizes a cleanup task and validates permissions.
+ *
+ * @throws InterruptedException
+ * @throws IOException
+ */
+ public void testCleanupTaskLocalization() throws IOException,
+ InterruptedException {
+ if (!canRun()) {
+ return;
+ }
+
+ task.setTaskCleanupTask();
+ // register task
+ tip = tracker.new TaskInProgress(task, trackerFConf);
+
+ // localize the job.
+ RunningJob rjob = tracker.localizeJob(tip);
+ localizedJobConf = rjob.getJobConf();
+ checkJobLocalization();
+
+ // localize task cleanup attempt
+ initializeTask();
+ checkTaskLocalization();
+
+ }
+}
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerMemoryManager.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerMemoryManager.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerMemoryManager.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerMemoryManager.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerSlotManagement.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerSlotManagement.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerSlotManagement.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerSlotManagement.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTextInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTextInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTextInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTextInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTextOutputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTextOutputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTextOutputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTextOutputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTrackerBlacklistAcrossJobs.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTrackerBlacklistAcrossJobs.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTrackerBlacklistAcrossJobs.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTrackerBlacklistAcrossJobs.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTrackerDistributedCacheManagerWithLinuxTaskController.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTrackerDistributedCacheManagerWithLinuxTaskController.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTrackerDistributedCacheManagerWithLinuxTaskController.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTrackerDistributedCacheManagerWithLinuxTaskController.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTrackerReservation.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTrackerReservation.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTrackerReservation.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTrackerReservation.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestUserDefinedCounters.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestUserDefinedCounters.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestUserDefinedCounters.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestUserDefinedCounters.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestUserLogCleanup.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestUserLogCleanup.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestUserLogCleanup.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestUserLogCleanup.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestUtils.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestUtils.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestUtils.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestUtils.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestWebUIAuthorization.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestWebUIAuthorization.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestWebUIAuthorization.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestWebUIAuthorization.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestWritableJobConf.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestWritableJobConf.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestWritableJobConf.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestWritableJobConf.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/ThreadedMapBenchmark.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/ThreadedMapBenchmark.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/ThreadedMapBenchmark.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/ThreadedMapBenchmark.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/WordCount.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/WordCount.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/WordCount.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/WordCount.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/concat.bz2 b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/concat.bz2
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/concat.bz2
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/concat.bz2
Binary files differ
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/concat.gz b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/concat.gz
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/concat.gz
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/concat.gz
Binary files differ
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/jobcontrol/JobControlTestUtils.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/jobcontrol/JobControlTestUtils.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/jobcontrol/JobControlTestUtils.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/jobcontrol/JobControlTestUtils.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/join/IncomparableKey.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/join/IncomparableKey.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/join/IncomparableKey.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/join/IncomparableKey.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/join/TestDatamerge.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/join/TestDatamerge.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/join/TestDatamerge.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/join/TestDatamerge.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/join/TestTupleWritable.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/join/TestTupleWritable.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/join/TestTupleWritable.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/join/TestTupleWritable.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/join/TestWrappedRecordReaderClassloader.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/join/TestWrappedRecordReaderClassloader.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/join/TestWrappedRecordReaderClassloader.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/join/TestWrappedRecordReaderClassloader.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestChain.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestChain.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestChain.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestChain.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestChainMapReduce.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestChainMapReduce.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestChainMapReduce.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestChainMapReduce.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestDelegatingInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestDelegatingInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestDelegatingInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestDelegatingInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestKeyFieldBasedComparator.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestKeyFieldBasedComparator.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestKeyFieldBasedComparator.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestKeyFieldBasedComparator.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestKeyFieldBasedPartitioner.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestKeyFieldBasedPartitioner.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestKeyFieldBasedPartitioner.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestKeyFieldBasedPartitioner.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestLineInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestLineInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestLineInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestLineInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestMultipleInputs.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestMultipleInputs.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestMultipleInputs.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestMultipleInputs.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestMultipleOutputs.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestMultipleOutputs.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestMultipleOutputs.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestMultipleOutputs.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestMultithreadedMapRunner.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestMultithreadedMapRunner.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestMultithreadedMapRunner.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/TestMultithreadedMapRunner.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/aggregate/AggregatorTests.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/aggregate/AggregatorTests.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/aggregate/AggregatorTests.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/aggregate/AggregatorTests.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/aggregate/TestAggregates.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/aggregate/TestAggregates.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/aggregate/TestAggregates.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/aggregate/TestAggregates.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/db/TestConstructQuery.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/db/TestConstructQuery.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/db/TestConstructQuery.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/lib/db/TestConstructQuery.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipes.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipes.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipes.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipes.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipesAsDifferentUser.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipesAsDifferentUser.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipesAsDifferentUser.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipesAsDifferentUser.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/pipes/WordCountInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/pipes/WordCountInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/pipes/WordCountInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/pipes/WordCountInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.jar b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.jar
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.jar
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.jar
Binary files differ
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.tar b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.tar
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.tar
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.tar
Binary files differ
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.tar.gz b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.tar.gz
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.tar.gz
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.tar.gz
Binary files differ
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.tgz b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.tgz
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.tgz
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.tgz
Binary files differ
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.txt b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.txt
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.txt
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.txt
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.zip b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.zip
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.zip
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/test.zip
Binary files differ
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/testCompressThenConcat.txt.bz2 b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/testCompressThenConcat.txt.bz2
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/testCompressThenConcat.txt.bz2
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/testCompressThenConcat.txt.bz2
Binary files differ
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/testCompressThenConcat.txt.gz b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/testCompressThenConcat.txt.gz
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/testCompressThenConcat.txt.gz
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/testCompressThenConcat.txt.gz
Binary files differ
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/testConcatThenCompress.txt.bz2 b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/testConcatThenCompress.txt.bz2
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/testConcatThenCompress.txt.bz2
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/testConcatThenCompress.txt.bz2
Binary files differ
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/testConcatThenCompress.txt.gz b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/testConcatThenCompress.txt.gz
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/testConcatThenCompress.txt.gz
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/testConcatThenCompress.txt.gz
Binary files differ
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/testscript.txt b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/testscript.txt
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/testscript.txt
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/testscript.txt
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/tools/TestGetGroups.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/tools/TestGetGroups.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapred/tools/TestGetGroups.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapred/tools/TestGetGroups.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/FailJob.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/FailJob.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/FailJob.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/FailJob.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/GenericMRLoadGenerator.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/GenericMRLoadGenerator.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/GenericMRLoadGenerator.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/GenericMRLoadGenerator.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/MapReduceTestUtil.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/MapReduceTestUtil.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/MapReduceTestUtil.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/MapReduceTestUtil.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/SleepJob.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/SleepJob.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/SleepJob.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/SleepJob.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestChild.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestChild.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestChild.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestChild.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestCounters.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestCounters.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestCounters.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestCounters.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestJobACLs.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestJobACLs.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestJobACLs.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestJobACLs.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestLocalRunner.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestLocalRunner.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestLocalRunner.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestLocalRunner.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestMRJobClient.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestMRJobClient.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestMRJobClient.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestMRJobClient.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestMapCollection.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestMapCollection.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestMapCollection.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestMapCollection.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestMapReduce.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestMapReduce.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestMapReduce.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestMapReduce.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestMapReduceLazyOutput.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestMapReduceLazyOutput.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestMapReduceLazyOutput.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestMapReduceLazyOutput.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestMapReduceLocal.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestMapReduceLocal.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestMapReduceLocal.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestMapReduceLocal.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestNoJobSetupCleanup.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestNoJobSetupCleanup.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestNoJobSetupCleanup.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestNoJobSetupCleanup.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestTaskContext.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestTaskContext.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestTaskContext.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestTaskContext.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestValueIterReset.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestValueIterReset.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestValueIterReset.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestValueIterReset.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/filecache/TestTrackerDistributedCacheManager.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/filecache/TestTrackerDistributedCacheManager.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/filecache/TestTrackerDistributedCacheManager.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/filecache/TestTrackerDistributedCacheManager.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/filecache/TestURIFragments.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/filecache/TestURIFragments.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/filecache/TestURIFragments.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/filecache/TestURIFragments.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEvents.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEvents.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEvents.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEvents.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/aggregate/AggregatorTests.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/aggregate/AggregatorTests.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/aggregate/AggregatorTests.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/aggregate/AggregatorTests.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/aggregate/TestMapReduceAggregates.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/aggregate/TestMapReduceAggregates.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/aggregate/TestMapReduceAggregates.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/aggregate/TestMapReduceAggregates.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/chain/TestChainErrors.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/chain/TestChainErrors.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/chain/TestChainErrors.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/chain/TestChainErrors.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/chain/TestMapReduceChain.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/chain/TestMapReduceChain.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/chain/TestMapReduceChain.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/chain/TestMapReduceChain.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/chain/TestSingleElementChain.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/chain/TestSingleElementChain.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/chain/TestSingleElementChain.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/chain/TestSingleElementChain.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestDBJob.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestDBJob.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestDBJob.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestDBJob.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestDataDrivenDBInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestDataDrivenDBInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestDataDrivenDBInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestDataDrivenDBInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestIntegerSplitter.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestIntegerSplitter.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestIntegerSplitter.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestIntegerSplitter.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestTextSplitter.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestTextSplitter.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestTextSplitter.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestTextSplitter.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/fieldsel/TestMRFieldSelection.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/fieldsel/TestMRFieldSelection.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/fieldsel/TestMRFieldSelection.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/fieldsel/TestMRFieldSelection.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestDelegatingInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestDelegatingInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestDelegatingInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestDelegatingInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestMRKeyValueTextInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestMRKeyValueTextInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestMRKeyValueTextInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestMRKeyValueTextInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestMRSequenceFileAsBinaryInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestMRSequenceFileAsBinaryInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestMRSequenceFileAsBinaryInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestMRSequenceFileAsBinaryInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestMRSequenceFileAsTextInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestMRSequenceFileAsTextInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestMRSequenceFileAsTextInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestMRSequenceFileAsTextInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestMRSequenceFileInputFilter.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestMRSequenceFileInputFilter.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestMRSequenceFileInputFilter.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestMRSequenceFileInputFilter.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestMultipleInputs.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestMultipleInputs.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestMultipleInputs.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestMultipleInputs.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestNLineInputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestNLineInputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestNLineInputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestNLineInputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/join/TestJoinDatamerge.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/join/TestJoinDatamerge.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/join/TestJoinDatamerge.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/join/TestJoinDatamerge.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/join/TestJoinProperties.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/join/TestJoinProperties.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/join/TestJoinProperties.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/join/TestJoinProperties.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/join/TestJoinTupleWritable.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/join/TestJoinTupleWritable.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/join/TestJoinTupleWritable.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/join/TestJoinTupleWritable.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/join/TestWrappedRRClassloader.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/join/TestWrappedRRClassloader.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/join/TestWrappedRRClassloader.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/join/TestWrappedRRClassloader.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/map/TestMultithreadedMapper.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/map/TestMultithreadedMapper.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/map/TestMultithreadedMapper.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/map/TestMultithreadedMapper.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/output/TestJobOutputCommitter.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/output/TestJobOutputCommitter.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/output/TestJobOutputCommitter.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/output/TestJobOutputCommitter.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/output/TestMRMultipleOutputs.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/output/TestMRMultipleOutputs.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/output/TestMRMultipleOutputs.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/output/TestMRMultipleOutputs.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/output/TestMRSequenceFileAsBinaryOutputFormat.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/output/TestMRSequenceFileAsBinaryOutputFormat.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/output/TestMRSequenceFileAsBinaryOutputFormat.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/output/TestMRSequenceFileAsBinaryOutputFormat.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestBinaryPartitioner.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestBinaryPartitioner.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestBinaryPartitioner.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestBinaryPartitioner.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestInputSampler.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestInputSampler.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestInputSampler.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestInputSampler.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestKeyFieldHelper.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestKeyFieldHelper.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestKeyFieldHelper.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestKeyFieldHelper.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestMRKeyFieldBasedComparator.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestMRKeyFieldBasedComparator.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestMRKeyFieldBasedComparator.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestMRKeyFieldBasedComparator.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestMRKeyFieldBasedPartitioner.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestMRKeyFieldBasedPartitioner.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestMRKeyFieldBasedPartitioner.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestMRKeyFieldBasedPartitioner.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestTotalOrderPartitioner.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestTotalOrderPartitioner.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestTotalOrderPartitioner.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/lib/partition/TestTotalOrderPartitioner.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestBinaryTokenFile.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestBinaryTokenFile.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestBinaryTokenFile.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestBinaryTokenFile.java
diff --git a/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java
new file mode 100644
index 0000000..3a769e7
--- /dev/null
+++ b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java
@@ -0,0 +1,431 @@
+/** Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.security;
+
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.mock;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.security.NoSuchAlgorithmException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import javax.crypto.KeyGenerator;
+import javax.crypto.spec.SecretKeySpec;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.viewfs.ViewFileSystem;
+import org.apache.hadoop.hdfs.HftpFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.Master;
+import org.apache.hadoop.mapred.MiniMRCluster;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.SleepJob;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.ToolRunner;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+public class TestTokenCache {
+ private static final int NUM_OF_KEYS = 10;
+
+ // my sleep class - adds check for tokenCache
+ static class MySleepMapper extends SleepJob.SleepMapper {
+ /**
+ * attempts to access tokenCache as from client
+ */
+ @Override
+ public void map(IntWritable key, IntWritable value, Context context)
+ throws IOException, InterruptedException {
+ // get token storage and a key
+ Credentials ts = context.getCredentials();
+ byte[] key1 = ts.getSecretKey(new Text("alias1"));
+ Collection<Token<? extends TokenIdentifier>> dts = ts.getAllTokens();
+ int dts_size = 0;
+ if(dts != null)
+ dts_size = dts.size();
+
+
+ if(dts_size != 2) { // one job token and one delegation token
+ throw new RuntimeException("tokens are not available"); // fail the test
+ }
+
+
+ if(key1 == null || ts == null || ts.numberOfSecretKeys() != NUM_OF_KEYS) {
+ throw new RuntimeException("secret keys are not available"); // fail the test
+ }
+ super.map(key, value, context);
+ }
+ }
+
+ class MySleepJob extends SleepJob {
+ @Override
+ public Job createJob(int numMapper, int numReducer,
+ long mapSleepTime, int mapSleepCount,
+ long reduceSleepTime, int reduceSleepCount)
+ throws IOException {
+ Job job = super.createJob(numMapper, numReducer,
+ mapSleepTime, mapSleepCount,
+ reduceSleepTime, reduceSleepCount);
+
+ job.setMapperClass(MySleepMapper.class);
+ //Populate tokens here because security is disabled.
+ populateTokens(job);
+ return job;
+ }
+
+ private void populateTokens(Job job) {
+ // Credentials in the job will not have delegation tokens
+ // because security is disabled. Fetch delegation tokens
+ // and populate the credential in the job.
+ try {
+ Credentials ts = job.getCredentials();
+ Path p1 = new Path("file1");
+ p1 = p1.getFileSystem(job.getConfiguration()).makeQualified(p1);
+ Credentials cred = new Credentials();
+ TokenCache.obtainTokensForNamenodesInternal(cred, new Path[] { p1 },
+ job.getConfiguration());
+ for (Token<? extends TokenIdentifier> t : cred.getAllTokens()) {
+ ts.addToken(new Text("Hdfs"), t);
+ }
+ } catch (IOException e) {
+ Assert.fail("Exception " + e);
+ }
+ }
+ }
+
+ private static MiniMRCluster mrCluster;
+ private static MiniDFSCluster dfsCluster;
+ private static final Path TEST_DIR =
+ new Path(System.getProperty("test.build.data","/tmp"), "sleepTest");
+ private static final Path tokenFileName = new Path(TEST_DIR, "tokenFile.json");
+ private static int numSlaves = 1;
+ private static JobConf jConf;
+ private static ObjectMapper mapper = new ObjectMapper();
+ private static Path p1;
+ private static Path p2;
+
+ @BeforeClass
+ public static void setUp() throws Exception {
+
+ Configuration conf = new Configuration();
+ conf.set("hadoop.security.auth_to_local", "RULE:[2:$1]");
+ dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
+ jConf = new JobConf(conf);
+ mrCluster = new MiniMRCluster(0, 0, numSlaves,
+ dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null,
+ jConf);
+
+ createTokenFileJson();
+ verifySecretKeysInJSONFile();
+ NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads();
+ FileSystem fs = dfsCluster.getFileSystem();
+
+ p1 = new Path("file1");
+ p2 = new Path("file2");
+
+ p1 = fs.makeQualified(p1);
+ }
+
+ @AfterClass
+ public static void tearDown() throws Exception {
+ if(mrCluster != null)
+ mrCluster.shutdown();
+ mrCluster = null;
+ if(dfsCluster != null)
+ dfsCluster.shutdown();
+ dfsCluster = null;
+ }
+
+ // create jason file and put some keys into it..
+ private static void createTokenFileJson() throws IOException {
+ Map<String, String> map = new HashMap<String, String>();
+
+ try {
+ KeyGenerator kg = KeyGenerator.getInstance("HmacSHA1");
+ for(int i=0; i<NUM_OF_KEYS; i++) {
+ SecretKeySpec key = (SecretKeySpec) kg.generateKey();
+ byte [] enc_key = key.getEncoded();
+ map.put("alias"+i, new String(Base64.encodeBase64(enc_key)));
+
+ }
+ } catch (NoSuchAlgorithmException e) {
+ throw new IOException(e);
+ }
+
+ try {
+ File p = new File(tokenFileName.getParent().toString());
+ p.mkdirs();
+ // convert to JSON and save to the file
+ mapper.writeValue(new File(tokenFileName.toString()), map);
+
+ } catch (Exception e) {
+ System.out.println("failed with :" + e.getLocalizedMessage());
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ private static void verifySecretKeysInJSONFile() throws IOException {
+ Map<String, String> map;
+ map = mapper.readValue(new File(tokenFileName.toString()), Map.class);
+ assertEquals("didn't read JSON correctly", map.size(), NUM_OF_KEYS);
+ }
+
+ /**
+ * run a distributed job and verify that TokenCache is available
+ * @throws IOException
+ */
+ @Test
+ public void testTokenCache() throws IOException {
+
+ System.out.println("running dist job");
+
+ // make sure JT starts
+ jConf = mrCluster.createJobConf();
+
+ // provide namenodes names for the job to get the delegation tokens for
+ String nnUri = dfsCluster.getURI(0).toString();
+ jConf.set(MRJobConfig.JOB_NAMENODES, nnUri + "," + nnUri);
+ // job tracker principla id..
+ jConf.set(JTConfig.JT_USER_NAME, "jt_id/foo@BAR");
+
+ // using argument to pass the file name
+ String[] args = {
+ "-tokenCacheFile", tokenFileName.toString(),
+ "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
+ };
+
+ int res = -1;
+ try {
+ res = ToolRunner.run(jConf, new MySleepJob(), args);
+ } catch (Exception e) {
+ System.out.println("Job failed with" + e.getLocalizedMessage());
+ e.printStackTrace(System.out);
+ fail("Job failed");
+ }
+ assertEquals("dist job res is not 0", res, 0);
+ }
+
+ /**
+ * run a local job and verify that TokenCache is available
+ * @throws NoSuchAlgorithmException
+ * @throws IOException
+ */
+ @Test
+ public void testLocalJobTokenCache() throws NoSuchAlgorithmException, IOException {
+
+ System.out.println("running local job");
+ // this is local job
+ String[] args = {"-m", "1", "-r", "1", "-mt", "1", "-rt", "1"};
+ jConf.set("mapreduce.job.credentials.json", tokenFileName.toString());
+
+ int res = -1;
+ try {
+ res = ToolRunner.run(jConf, new MySleepJob(), args);
+ } catch (Exception e) {
+ System.out.println("Job failed with" + e.getLocalizedMessage());
+ e.printStackTrace(System.out);
+ fail("local Job failed");
+ }
+ assertEquals("local job res is not 0", res, 0);
+ }
+
+ @Test
+ public void testGetTokensForNamenodes() throws IOException {
+
+ Credentials credentials = new Credentials();
+ TokenCache.obtainTokensForNamenodesInternal(credentials, new Path[] { p1,
+ p2 }, jConf);
+
+ // this token is keyed by hostname:port key.
+ String fs_addr =
+ SecurityUtil.buildDTServiceName(p1.toUri(), NameNode.DEFAULT_PORT);
+ Token<DelegationTokenIdentifier> nnt = TokenCache.getDelegationToken(
+ credentials, fs_addr);
+ System.out.println("dt for " + p1 + "(" + fs_addr + ")" + " = " + nnt);
+ assertNotNull("Token for nn is null", nnt);
+
+ // verify the size
+ Collection<Token<? extends TokenIdentifier>> tns = credentials.getAllTokens();
+ assertEquals("number of tokens is not 1", 1, tns.size());
+
+ boolean found = false;
+ for(Token<? extends TokenIdentifier> t: tns) {
+ if(t.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND) &&
+ t.getService().equals(new Text(fs_addr))) {
+ found = true;
+ }
+ assertTrue("didn't find token for " + p1 ,found);
+ }
+ }
+
+ @Test
+ public void testGetTokensForHftpFS() throws IOException, URISyntaxException {
+ HftpFileSystem hfs = mock(HftpFileSystem.class);
+
+ DelegationTokenSecretManager dtSecretManager =
+ NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem());
+ String renewer = "renewer";
+ jConf.set(JTConfig.JT_USER_NAME,renewer);
+ DelegationTokenIdentifier dtId =
+ new DelegationTokenIdentifier(new Text("user"), new Text(renewer), null);
+ final Token<DelegationTokenIdentifier> t =
+ new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
+
+ final URI uri = new URI("hftp://host:2222/file1");
+ final String fs_addr =
+ SecurityUtil.buildDTServiceName(uri, NameNode.DEFAULT_PORT);
+ t.setService(new Text(fs_addr));
+
+ //when(hfs.getUri()).thenReturn(uri);
+ Mockito.doAnswer(new Answer<URI>(){
+ @Override
+ public URI answer(InvocationOnMock invocation)
+ throws Throwable {
+ return uri;
+ }}).when(hfs).getUri();
+
+ //when(hfs.getDelegationToken()).thenReturn((Token<? extends TokenIdentifier>) t);
+ Mockito.doAnswer(new Answer<Token<DelegationTokenIdentifier>>(){
+ @Override
+ public Token<DelegationTokenIdentifier> answer(InvocationOnMock invocation)
+ throws Throwable {
+ return t;
+ }}).when(hfs).getDelegationToken(renewer);
+
+ //when(hfs.getDelegationTokens()).thenReturn((Token<? extends TokenIdentifier>) t);
+ Mockito.doAnswer(new Answer<List<Token<DelegationTokenIdentifier>>>(){
+ @Override
+ public List<Token<DelegationTokenIdentifier>> answer(InvocationOnMock invocation)
+ throws Throwable {
+ return Collections.singletonList(t);
+ }}).when(hfs).getDelegationTokens(renewer);
+
+ //when(hfs.getCanonicalServiceName).thenReturn(fs_addr);
+ Mockito.doAnswer(new Answer<String>(){
+ @Override
+ public String answer(InvocationOnMock invocation)
+ throws Throwable {
+ return fs_addr;
+ }}).when(hfs).getCanonicalServiceName();
+
+ Credentials credentials = new Credentials();
+ Path p = new Path(uri.toString());
+ System.out.println("Path for hftp="+ p + "; fs_addr="+fs_addr + "; rn=" + renewer);
+ TokenCache.obtainTokensForNamenodesInternal(hfs, credentials, jConf);
+
+ Collection<Token<? extends TokenIdentifier>> tns = credentials.getAllTokens();
+ assertEquals("number of tokens is not 1", 1, tns.size());
+
+ boolean found = false;
+ for(Token<? extends TokenIdentifier> tt: tns) {
+ System.out.println("token="+tt);
+ if(tt.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND) &&
+ tt.getService().equals(new Text(fs_addr))) {
+ found = true;
+ assertEquals("different token", tt, t);
+ }
+ assertTrue("didn't find token for " + p, found);
+ }
+ }
+
+ /**
+ * verify _HOST substitution
+ * @throws IOException
+ */
+ @Test
+ public void testGetJTPrincipal() throws IOException {
+ String serviceName = "jt/";
+ String hostName = "foo";
+ String domainName = "@BAR";
+ Configuration conf = new Configuration();
+ conf.set(JTConfig.JT_IPC_ADDRESS, hostName + ":8888");
+ conf.set(JTConfig.JT_USER_NAME, serviceName + SecurityUtil.HOSTNAME_PATTERN
+ + domainName);
+ assertEquals("Failed to substitute HOSTNAME_PATTERN with hostName",
+ serviceName + hostName + domainName, Master.getMasterPrincipal(conf));
+ }
+
+ @Test
+ public void testGetTokensForViewFS() throws IOException, URISyntaxException {
+ Configuration conf = new Configuration(jConf);
+ FileSystem dfs = dfsCluster.getFileSystem();
+ String serviceName = dfs.getCanonicalServiceName();
+
+ Path p1 = new Path("/mount1");
+ Path p2 = new Path("/mount2");
+ p1 = dfs.makeQualified(p1);
+ p2 = dfs.makeQualified(p2);
+
+ conf.set("fs.viewfs.mounttable.default.link./dir1", p1.toString());
+ conf.set("fs.viewfs.mounttable.default.link./dir2", p2.toString());
+ Credentials credentials = new Credentials();
+ Path lp1 = new Path("viewfs:///dir1");
+ Path lp2 = new Path("viewfs:///dir2");
+ Path[] paths = new Path[2];
+ paths[0] = lp1;
+ paths[1] = lp2;
+ TokenCache.obtainTokensForNamenodesInternal(credentials, paths, conf);
+
+ Collection<Token<? extends TokenIdentifier>> tns =
+ credentials.getAllTokens();
+ assertEquals("number of tokens is not 1", 1, tns.size());
+
+ boolean found = false;
+ for (Token<? extends TokenIdentifier> tt : tns) {
+ System.out.println("token=" + tt);
+ if (tt.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND)
+ && tt.getService().equals(new Text(serviceName))) {
+ found = true;
+ }
+ assertTrue("didn't find token for [" + lp1 + ", " + lp2 + "]", found);
+ }
+ }
+}
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCacheOldApi.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCacheOldApi.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCacheOldApi.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCacheOldApi.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/token/TestDelegationTokenRenewal.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/token/TestDelegationTokenRenewal.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/token/TestDelegationTokenRenewal.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/token/TestDelegationTokenRenewal.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/token/delegation/TestDelegationToken.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/token/delegation/TestDelegationToken.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/token/delegation/TestDelegationToken.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/token/delegation/TestDelegationToken.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/util/TestMRAsyncDiskService.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/util/TestMRAsyncDiskService.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/util/TestMRAsyncDiskService.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/util/TestMRAsyncDiskService.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/util/TestProcfsBasedProcessTree.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/util/TestProcfsBasedProcessTree.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/util/TestProcfsBasedProcessTree.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/util/TestProcfsBasedProcessTree.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/record/TestRecordMR.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/record/TestRecordMR.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/record/TestRecordMR.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/record/TestRecordMR.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/record/TestRecordWritable.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/record/TestRecordWritable.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/record/TestRecordWritable.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/record/TestRecordWritable.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/security/TestMapredGroupMappingServiceRefresh.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/security/TestMapredGroupMappingServiceRefresh.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/security/TestMapredGroupMappingServiceRefresh.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/security/TestMapredGroupMappingServiceRefresh.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/security/authorize/HadoopPolicyProvider.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/security/authorize/HadoopPolicyProvider.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/security/authorize/HadoopPolicyProvider.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/security/authorize/HadoopPolicyProvider.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/test/MapredTestDriver.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/test/MapredTestDriver.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/test/MapredTestDriver.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/test/MapredTestDriver.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/tools/TestCopyFiles.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/TestCopyFiles.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/tools/TestCopyFiles.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/TestCopyFiles.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/tools/TestDistCh.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/TestDistCh.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/tools/TestDistCh.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/TestDistCh.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/tools/TestHadoopArchives.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/TestHadoopArchives.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/tools/TestHadoopArchives.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/TestHadoopArchives.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/tools/TestHarFileSystem.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/TestHarFileSystem.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/tools/TestHarFileSystem.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/TestHarFileSystem.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/ConcatenatedInputFilesDemuxer.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/ConcatenatedInputFilesDemuxer.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/ConcatenatedInputFilesDemuxer.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/ConcatenatedInputFilesDemuxer.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/HistogramRawTestData.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/HistogramRawTestData.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/HistogramRawTestData.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/HistogramRawTestData.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestConcurrentRead.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestConcurrentRead.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestConcurrentRead.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestConcurrentRead.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestHistograms.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestHistograms.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestHistograms.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestHistograms.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestParsedLine.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestParsedLine.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestParsedLine.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestParsedLine.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestPiecewiseLinearInterpolation.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestPiecewiseLinearInterpolation.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestPiecewiseLinearInterpolation.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestPiecewiseLinearInterpolation.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestRandomSeedGenerator.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestRandomSeedGenerator.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestRandomSeedGenerator.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestRandomSeedGenerator.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestRumenFolder.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestRumenFolder.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestRumenFolder.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestRumenFolder.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestRumenJobTraces.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestRumenJobTraces.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestRumenJobTraces.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestRumenJobTraces.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestZombieJob.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestZombieJob.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestZombieJob.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestZombieJob.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/util/TestReflectionUtils.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/util/TestReflectionUtils.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/util/TestReflectionUtils.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/util/TestReflectionUtils.java
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/util/TestRunJar.java b/hadoop-mapreduce/src/test/mapred/org/apache/hadoop/util/TestRunJar.java
similarity index 100%
rename from mapreduce/src/test/mapred/org/apache/hadoop/util/TestRunJar.java
rename to hadoop-mapreduce/src/test/mapred/org/apache/hadoop/util/TestRunJar.java
diff --git a/mapreduce/src/test/mapred/testjar/ClassWithNoPackage.java b/hadoop-mapreduce/src/test/mapred/testjar/ClassWithNoPackage.java
similarity index 100%
rename from mapreduce/src/test/mapred/testjar/ClassWithNoPackage.java
rename to hadoop-mapreduce/src/test/mapred/testjar/ClassWithNoPackage.java
diff --git a/mapreduce/src/test/mapred/testjar/ClassWordCount.java b/hadoop-mapreduce/src/test/mapred/testjar/ClassWordCount.java
similarity index 100%
rename from mapreduce/src/test/mapred/testjar/ClassWordCount.java
rename to hadoop-mapreduce/src/test/mapred/testjar/ClassWordCount.java
diff --git a/mapreduce/src/test/mapred/testjar/CustomOutputCommitter.java b/hadoop-mapreduce/src/test/mapred/testjar/CustomOutputCommitter.java
similarity index 100%
rename from mapreduce/src/test/mapred/testjar/CustomOutputCommitter.java
rename to hadoop-mapreduce/src/test/mapred/testjar/CustomOutputCommitter.java
diff --git a/mapreduce/src/test/mapred/testjar/ExternalIdentityReducer.java b/hadoop-mapreduce/src/test/mapred/testjar/ExternalIdentityReducer.java
similarity index 100%
rename from mapreduce/src/test/mapred/testjar/ExternalIdentityReducer.java
rename to hadoop-mapreduce/src/test/mapred/testjar/ExternalIdentityReducer.java
diff --git a/mapreduce/src/test/mapred/testjar/ExternalMapperReducer.java b/hadoop-mapreduce/src/test/mapred/testjar/ExternalMapperReducer.java
similarity index 100%
rename from mapreduce/src/test/mapred/testjar/ExternalMapperReducer.java
rename to hadoop-mapreduce/src/test/mapred/testjar/ExternalMapperReducer.java
diff --git a/mapreduce/src/test/mapred/testjar/ExternalWritable.java b/hadoop-mapreduce/src/test/mapred/testjar/ExternalWritable.java
similarity index 100%
rename from mapreduce/src/test/mapred/testjar/ExternalWritable.java
rename to hadoop-mapreduce/src/test/mapred/testjar/ExternalWritable.java
diff --git a/mapreduce/src/test/mapred/testjar/Hello.java b/hadoop-mapreduce/src/test/mapred/testjar/Hello.java
similarity index 100%
rename from mapreduce/src/test/mapred/testjar/Hello.java
rename to hadoop-mapreduce/src/test/mapred/testjar/Hello.java
diff --git a/mapreduce/src/test/mapred/testjar/JobKillCommitter.java b/hadoop-mapreduce/src/test/mapred/testjar/JobKillCommitter.java
similarity index 100%
rename from mapreduce/src/test/mapred/testjar/JobKillCommitter.java
rename to hadoop-mapreduce/src/test/mapred/testjar/JobKillCommitter.java
diff --git a/mapreduce/src/test/mapred/testjar/UserNamePermission.java b/hadoop-mapreduce/src/test/mapred/testjar/UserNamePermission.java
similarity index 100%
rename from mapreduce/src/test/mapred/testjar/UserNamePermission.java
rename to hadoop-mapreduce/src/test/mapred/testjar/UserNamePermission.java
diff --git a/mapreduce/src/test/mapred/testshell/ExternalMapReduce.java b/hadoop-mapreduce/src/test/mapred/testshell/ExternalMapReduce.java
similarity index 100%
rename from mapreduce/src/test/mapred/testshell/ExternalMapReduce.java
rename to hadoop-mapreduce/src/test/mapred/testshell/ExternalMapReduce.java
diff --git a/mapreduce/src/test/smoke-tests b/hadoop-mapreduce/src/test/smoke-tests
similarity index 100%
rename from mapreduce/src/test/smoke-tests
rename to hadoop-mapreduce/src/test/smoke-tests
diff --git a/mapreduce/src/test/system/aop/org/apache/hadoop/mapred/JTProtocolAspect.aj b/hadoop-mapreduce/src/test/system/aop/org/apache/hadoop/mapred/JTProtocolAspect.aj
similarity index 100%
rename from mapreduce/src/test/system/aop/org/apache/hadoop/mapred/JTProtocolAspect.aj
rename to hadoop-mapreduce/src/test/system/aop/org/apache/hadoop/mapred/JTProtocolAspect.aj
diff --git a/mapreduce/src/test/system/aop/org/apache/hadoop/mapred/JobClientAspect.aj b/hadoop-mapreduce/src/test/system/aop/org/apache/hadoop/mapred/JobClientAspect.aj
similarity index 100%
rename from mapreduce/src/test/system/aop/org/apache/hadoop/mapred/JobClientAspect.aj
rename to hadoop-mapreduce/src/test/system/aop/org/apache/hadoop/mapred/JobClientAspect.aj
diff --git a/mapreduce/src/test/system/aop/org/apache/hadoop/mapred/JobInProgressAspect.aj b/hadoop-mapreduce/src/test/system/aop/org/apache/hadoop/mapred/JobInProgressAspect.aj
similarity index 100%
rename from mapreduce/src/test/system/aop/org/apache/hadoop/mapred/JobInProgressAspect.aj
rename to hadoop-mapreduce/src/test/system/aop/org/apache/hadoop/mapred/JobInProgressAspect.aj
diff --git a/mapreduce/src/test/system/aop/org/apache/hadoop/mapred/JobTrackerAspect.aj b/hadoop-mapreduce/src/test/system/aop/org/apache/hadoop/mapred/JobTrackerAspect.aj
similarity index 100%
rename from mapreduce/src/test/system/aop/org/apache/hadoop/mapred/JobTrackerAspect.aj
rename to hadoop-mapreduce/src/test/system/aop/org/apache/hadoop/mapred/JobTrackerAspect.aj
diff --git a/mapreduce/src/test/system/aop/org/apache/hadoop/mapred/MapReducePolicyProviderAspect.aj b/hadoop-mapreduce/src/test/system/aop/org/apache/hadoop/mapred/MapReducePolicyProviderAspect.aj
similarity index 100%
rename from mapreduce/src/test/system/aop/org/apache/hadoop/mapred/MapReducePolicyProviderAspect.aj
rename to hadoop-mapreduce/src/test/system/aop/org/apache/hadoop/mapred/MapReducePolicyProviderAspect.aj
diff --git a/mapreduce/src/test/system/aop/org/apache/hadoop/mapred/TaskAspect.aj b/hadoop-mapreduce/src/test/system/aop/org/apache/hadoop/mapred/TaskAspect.aj
similarity index 100%
rename from mapreduce/src/test/system/aop/org/apache/hadoop/mapred/TaskAspect.aj
rename to hadoop-mapreduce/src/test/system/aop/org/apache/hadoop/mapred/TaskAspect.aj
diff --git a/mapreduce/src/test/system/aop/org/apache/hadoop/mapred/TaskTrackerAspect.aj b/hadoop-mapreduce/src/test/system/aop/org/apache/hadoop/mapred/TaskTrackerAspect.aj
similarity index 100%
rename from mapreduce/src/test/system/aop/org/apache/hadoop/mapred/TaskTrackerAspect.aj
rename to hadoop-mapreduce/src/test/system/aop/org/apache/hadoop/mapred/TaskTrackerAspect.aj
diff --git a/mapreduce/src/test/system/aop/org/apache/hadoop/mapreduce/ClusterAspect.aj b/hadoop-mapreduce/src/test/system/aop/org/apache/hadoop/mapreduce/ClusterAspect.aj
similarity index 100%
rename from mapreduce/src/test/system/aop/org/apache/hadoop/mapreduce/ClusterAspect.aj
rename to hadoop-mapreduce/src/test/system/aop/org/apache/hadoop/mapreduce/ClusterAspect.aj
diff --git a/mapreduce/src/test/system/conf/system-test-mapred.xml b/hadoop-mapreduce/src/test/system/conf/system-test-mapred.xml
similarity index 100%
rename from mapreduce/src/test/system/conf/system-test-mapred.xml
rename to hadoop-mapreduce/src/test/system/conf/system-test-mapred.xml
diff --git a/mapreduce/src/test/system/java/org/apache/hadoop/mapred/JobInfoImpl.java b/hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapred/JobInfoImpl.java
similarity index 100%
rename from mapreduce/src/test/system/java/org/apache/hadoop/mapred/JobInfoImpl.java
rename to hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapred/JobInfoImpl.java
diff --git a/mapreduce/src/test/system/java/org/apache/hadoop/mapred/TTInfoImpl.java b/hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapred/TTInfoImpl.java
similarity index 100%
rename from mapreduce/src/test/system/java/org/apache/hadoop/mapred/TTInfoImpl.java
rename to hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapred/TTInfoImpl.java
diff --git a/mapreduce/src/test/system/java/org/apache/hadoop/mapred/TTTaskInfoImpl.java b/hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapred/TTTaskInfoImpl.java
similarity index 100%
rename from mapreduce/src/test/system/java/org/apache/hadoop/mapred/TTTaskInfoImpl.java
rename to hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapred/TTTaskInfoImpl.java
diff --git a/mapreduce/src/test/system/java/org/apache/hadoop/mapred/TaskInfoImpl.java b/hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapred/TaskInfoImpl.java
similarity index 100%
rename from mapreduce/src/test/system/java/org/apache/hadoop/mapred/TaskInfoImpl.java
rename to hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapred/TaskInfoImpl.java
diff --git a/mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/FinishTaskControlAction.java b/hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/FinishTaskControlAction.java
similarity index 100%
rename from mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/FinishTaskControlAction.java
rename to hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/FinishTaskControlAction.java
diff --git a/mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTClient.java b/hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTClient.java
similarity index 100%
rename from mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTClient.java
rename to hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTClient.java
diff --git a/mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTProtocol.java b/hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTProtocol.java
similarity index 100%
rename from mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTProtocol.java
rename to hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTProtocol.java
diff --git a/mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JobInfo.java b/hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JobInfo.java
similarity index 100%
rename from mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JobInfo.java
rename to hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JobInfo.java
diff --git a/mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRCluster.java b/hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRCluster.java
similarity index 100%
rename from mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRCluster.java
rename to hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRCluster.java
diff --git a/mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRDaemonClient.java b/hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRDaemonClient.java
similarity index 100%
rename from mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRDaemonClient.java
rename to hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRDaemonClient.java
diff --git a/mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTClient.java b/hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTClient.java
similarity index 100%
rename from mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTClient.java
rename to hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTClient.java
diff --git a/mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTInfo.java b/hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTInfo.java
similarity index 100%
rename from mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTInfo.java
rename to hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTInfo.java
diff --git a/mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTProtocol.java b/hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTProtocol.java
similarity index 100%
rename from mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTProtocol.java
rename to hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTProtocol.java
diff --git a/mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTTaskInfo.java b/hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTTaskInfo.java
similarity index 100%
rename from mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTTaskInfo.java
rename to hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTTaskInfo.java
diff --git a/mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TaskInfo.java b/hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TaskInfo.java
similarity index 100%
rename from mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TaskInfo.java
rename to hadoop-mapreduce/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TaskInfo.java
diff --git a/mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestCluster.java b/hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestCluster.java
similarity index 100%
rename from mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestCluster.java
rename to hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestCluster.java
diff --git a/mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestControlledJob.java b/hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestControlledJob.java
similarity index 100%
rename from mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestControlledJob.java
rename to hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestControlledJob.java
diff --git a/mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCacheModifiedFile.java b/hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCacheModifiedFile.java
similarity index 100%
rename from mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCacheModifiedFile.java
rename to hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCacheModifiedFile.java
diff --git a/mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCachePrivateFile.java b/hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCachePrivateFile.java
similarity index 100%
rename from mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCachePrivateFile.java
rename to hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCachePrivateFile.java
diff --git a/mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCacheUnModifiedFile.java b/hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCacheUnModifiedFile.java
similarity index 100%
rename from mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCacheUnModifiedFile.java
rename to hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCacheUnModifiedFile.java
diff --git a/mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestFileOwner.java b/hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestFileOwner.java
similarity index 100%
rename from mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestFileOwner.java
rename to hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestFileOwner.java
diff --git a/mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestJobKill.java b/hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestJobKill.java
similarity index 100%
rename from mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestJobKill.java
rename to hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestJobKill.java
diff --git a/mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestPushConfig.java b/hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestPushConfig.java
similarity index 100%
rename from mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestPushConfig.java
rename to hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestPushConfig.java
diff --git a/mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestSortValidate.java b/hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestSortValidate.java
similarity index 100%
rename from mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestSortValidate.java
rename to hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestSortValidate.java
diff --git a/mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestTaskKilling.java b/hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestTaskKilling.java
similarity index 100%
rename from mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestTaskKilling.java
rename to hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestTaskKilling.java
diff --git a/mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestTaskOwner.java b/hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestTaskOwner.java
similarity index 100%
rename from mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestTaskOwner.java
rename to hadoop-mapreduce/src/test/system/test/org/apache/hadoop/mapred/TestTaskOwner.java
diff --git a/mapreduce/src/test/test-patch.properties b/hadoop-mapreduce/src/test/test-patch.properties
similarity index 100%
rename from mapreduce/src/test/test-patch.properties
rename to hadoop-mapreduce/src/test/test-patch.properties
diff --git a/mapreduce/src/test/tools/data/rumen/histogram-tests/gold-minimal.json b/hadoop-mapreduce/src/test/tools/data/rumen/histogram-tests/gold-minimal.json
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/histogram-tests/gold-minimal.json
rename to hadoop-mapreduce/src/test/tools/data/rumen/histogram-tests/gold-minimal.json
diff --git a/mapreduce/src/test/tools/data/rumen/histogram-tests/gold-one-value-many-repeats.json b/hadoop-mapreduce/src/test/tools/data/rumen/histogram-tests/gold-one-value-many-repeats.json
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/histogram-tests/gold-one-value-many-repeats.json
rename to hadoop-mapreduce/src/test/tools/data/rumen/histogram-tests/gold-one-value-many-repeats.json
diff --git a/mapreduce/src/test/tools/data/rumen/histogram-tests/gold-only-one-value.json b/hadoop-mapreduce/src/test/tools/data/rumen/histogram-tests/gold-only-one-value.json
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/histogram-tests/gold-only-one-value.json
rename to hadoop-mapreduce/src/test/tools/data/rumen/histogram-tests/gold-only-one-value.json
diff --git a/mapreduce/src/test/tools/data/rumen/histogram-tests/gold-three-values.json b/hadoop-mapreduce/src/test/tools/data/rumen/histogram-tests/gold-three-values.json
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/histogram-tests/gold-three-values.json
rename to hadoop-mapreduce/src/test/tools/data/rumen/histogram-tests/gold-three-values.json
diff --git a/mapreduce/src/test/tools/data/rumen/histogram-tests/input-minimal.json b/hadoop-mapreduce/src/test/tools/data/rumen/histogram-tests/input-minimal.json
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/histogram-tests/input-minimal.json
rename to hadoop-mapreduce/src/test/tools/data/rumen/histogram-tests/input-minimal.json
diff --git a/mapreduce/src/test/tools/data/rumen/histogram-tests/input-one-value-many-repeats.json b/hadoop-mapreduce/src/test/tools/data/rumen/histogram-tests/input-one-value-many-repeats.json
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/histogram-tests/input-one-value-many-repeats.json
rename to hadoop-mapreduce/src/test/tools/data/rumen/histogram-tests/input-one-value-many-repeats.json
diff --git a/mapreduce/src/test/tools/data/rumen/histogram-tests/input-only-one-value.json b/hadoop-mapreduce/src/test/tools/data/rumen/histogram-tests/input-only-one-value.json
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/histogram-tests/input-only-one-value.json
rename to hadoop-mapreduce/src/test/tools/data/rumen/histogram-tests/input-only-one-value.json
diff --git a/mapreduce/src/test/tools/data/rumen/histogram-tests/input-three-values.json b/hadoop-mapreduce/src/test/tools/data/rumen/histogram-tests/input-three-values.json
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/histogram-tests/input-three-values.json
rename to hadoop-mapreduce/src/test/tools/data/rumen/histogram-tests/input-three-values.json
diff --git a/mapreduce/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_40864_conf.xml b/hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_40864_conf.xml
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_40864_conf.xml
rename to hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_40864_conf.xml
diff --git a/mapreduce/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_40864_job_name-DAILY%2F20100210%5D.gz b/hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_40864_job_name-DAILY%2F20100210%5D.gz
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_40864_job_name-DAILY%2F20100210%5D.gz
rename to hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_40864_job_name-DAILY%2F20100210%5D.gz
Binary files differ
diff --git a/mapreduce/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_50510_conf.xml b/hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_50510_conf.xml
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_50510_conf.xml
rename to hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_50510_conf.xml
diff --git a/mapreduce/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_50510_job_name-DAILY%2F20100208%5D.gz b/hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_50510_job_name-DAILY%2F20100208%5D.gz
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_50510_job_name-DAILY%2F20100208%5D.gz
rename to hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_50510_job_name-DAILY%2F20100208%5D.gz
Binary files differ
diff --git a/mapreduce/src/test/tools/data/rumen/small-trace-test/counters-test-trace.json.gz b/hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/counters-test-trace.json.gz
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/small-trace-test/counters-test-trace.json.gz
rename to hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/counters-test-trace.json.gz
Binary files differ
diff --git a/mapreduce/src/test/tools/data/rumen/small-trace-test/dispatch-sample-v20-jt-log.gz b/hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/dispatch-sample-v20-jt-log.gz
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/small-trace-test/dispatch-sample-v20-jt-log.gz
rename to hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/dispatch-sample-v20-jt-log.gz
Binary files differ
diff --git a/mapreduce/src/test/tools/data/rumen/small-trace-test/dispatch-topology-output.json.gz b/hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/dispatch-topology-output.json.gz
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/small-trace-test/dispatch-topology-output.json.gz
rename to hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/dispatch-topology-output.json.gz
Binary files differ
diff --git a/mapreduce/src/test/tools/data/rumen/small-trace-test/dispatch-trace-output.json.gz b/hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/dispatch-trace-output.json.gz
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/small-trace-test/dispatch-trace-output.json.gz
rename to hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/dispatch-trace-output.json.gz
Binary files differ
diff --git a/mapreduce/src/test/tools/data/rumen/small-trace-test/folder-input-trace.json.gz b/hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/folder-input-trace.json.gz
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/small-trace-test/folder-input-trace.json.gz
rename to hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/folder-input-trace.json.gz
Binary files differ
diff --git a/mapreduce/src/test/tools/data/rumen/small-trace-test/goldFoldedTrace.json.gz b/hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/goldFoldedTrace.json.gz
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/small-trace-test/goldFoldedTrace.json.gz
rename to hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/goldFoldedTrace.json.gz
Binary files differ
diff --git a/mapreduce/src/test/tools/data/rumen/small-trace-test/job-tracker-logs-topology-output b/hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/job-tracker-logs-topology-output
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/small-trace-test/job-tracker-logs-topology-output
rename to hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/job-tracker-logs-topology-output
diff --git a/mapreduce/src/test/tools/data/rumen/small-trace-test/job-tracker-logs-trace-output.gz b/hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/job-tracker-logs-trace-output.gz
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/small-trace-test/job-tracker-logs-trace-output.gz
rename to hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/job-tracker-logs-trace-output.gz
Binary files differ
diff --git a/mapreduce/src/test/tools/data/rumen/small-trace-test/sample-conf.file.new.xml b/hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/sample-conf.file.new.xml
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/small-trace-test/sample-conf.file.new.xml
rename to hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/sample-conf.file.new.xml
diff --git a/mapreduce/src/test/tools/data/rumen/small-trace-test/sample-conf.file.xml b/hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/sample-conf.file.xml
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/small-trace-test/sample-conf.file.xml
rename to hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/sample-conf.file.xml
diff --git a/mapreduce/src/test/tools/data/rumen/small-trace-test/sample-job-tracker-logs.gz b/hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/sample-job-tracker-logs.gz
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/small-trace-test/sample-job-tracker-logs.gz
rename to hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/sample-job-tracker-logs.gz
Binary files differ
diff --git a/mapreduce/src/test/tools/data/rumen/small-trace-test/truncated-job-tracker-log b/hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/truncated-job-tracker-log
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/small-trace-test/truncated-job-tracker-log
rename to hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/truncated-job-tracker-log
diff --git a/mapreduce/src/test/tools/data/rumen/small-trace-test/truncated-topology-output b/hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/truncated-topology-output
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/small-trace-test/truncated-topology-output
rename to hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/truncated-topology-output
diff --git a/mapreduce/src/test/tools/data/rumen/small-trace-test/truncated-trace-output b/hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/truncated-trace-output
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/small-trace-test/truncated-trace-output
rename to hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/truncated-trace-output
diff --git a/mapreduce/src/test/tools/data/rumen/small-trace-test/v20-resource-usage-log.gz b/hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/v20-resource-usage-log.gz
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/small-trace-test/v20-resource-usage-log.gz
rename to hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/v20-resource-usage-log.gz
Binary files differ
diff --git a/mapreduce/src/test/tools/data/rumen/small-trace-test/v20-single-input-log.gz b/hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/v20-single-input-log.gz
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/small-trace-test/v20-single-input-log.gz
rename to hadoop-mapreduce/src/test/tools/data/rumen/small-trace-test/v20-single-input-log.gz
Binary files differ
diff --git a/mapreduce/src/test/tools/data/rumen/zombie/input-topology.json b/hadoop-mapreduce/src/test/tools/data/rumen/zombie/input-topology.json
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/zombie/input-topology.json
rename to hadoop-mapreduce/src/test/tools/data/rumen/zombie/input-topology.json
diff --git a/mapreduce/src/test/tools/data/rumen/zombie/input-trace.json b/hadoop-mapreduce/src/test/tools/data/rumen/zombie/input-trace.json
similarity index 100%
rename from mapreduce/src/test/tools/data/rumen/zombie/input-trace.json
rename to hadoop-mapreduce/src/test/tools/data/rumen/zombie/input-trace.json
diff --git a/mapreduce/src/test/unit/org/apache/hadoop/mapred/TestJobTrackerPlugins.java b/hadoop-mapreduce/src/test/unit/org/apache/hadoop/mapred/TestJobTrackerPlugins.java
similarity index 100%
rename from mapreduce/src/test/unit/org/apache/hadoop/mapred/TestJobTrackerPlugins.java
rename to hadoop-mapreduce/src/test/unit/org/apache/hadoop/mapred/TestJobTrackerPlugins.java
diff --git a/mapreduce/src/test/unit/org/apache/hadoop/mapred/TestLostTaskTracker.java b/hadoop-mapreduce/src/test/unit/org/apache/hadoop/mapred/TestLostTaskTracker.java
similarity index 100%
rename from mapreduce/src/test/unit/org/apache/hadoop/mapred/TestLostTaskTracker.java
rename to hadoop-mapreduce/src/test/unit/org/apache/hadoop/mapred/TestLostTaskTracker.java
diff --git a/mapreduce/src/test/unit/org/apache/hadoop/mapred/TestTaskTrackerDirectories.java b/hadoop-mapreduce/src/test/unit/org/apache/hadoop/mapred/TestTaskTrackerDirectories.java
similarity index 100%
rename from mapreduce/src/test/unit/org/apache/hadoop/mapred/TestTaskTrackerDirectories.java
rename to hadoop-mapreduce/src/test/unit/org/apache/hadoop/mapred/TestTaskTrackerDirectories.java
diff --git a/mapreduce/src/test/unit/org/apache/hadoop/mapreduce/lib/jobcontrol/TestControlledJob.java b/hadoop-mapreduce/src/test/unit/org/apache/hadoop/mapreduce/lib/jobcontrol/TestControlledJob.java
similarity index 100%
rename from mapreduce/src/test/unit/org/apache/hadoop/mapreduce/lib/jobcontrol/TestControlledJob.java
rename to hadoop-mapreduce/src/test/unit/org/apache/hadoop/mapreduce/lib/jobcontrol/TestControlledJob.java
diff --git a/mapreduce/src/test/unit/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java b/hadoop-mapreduce/src/test/unit/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java
similarity index 100%
rename from mapreduce/src/test/unit/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java
rename to hadoop-mapreduce/src/test/unit/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/fs/HarFileSystem.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/fs/HarFileSystem.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/fs/HarFileSystem.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/fs/HarFileSystem.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/fs/package-info.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/fs/package-info.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/fs/package-info.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/fs/package-info.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/DistCh.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/DistCh.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/DistCh.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/DistCh.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/DistCp.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/DistCp.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/DistCp.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/DistCp.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/DistCp_Counter.properties b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/DistCp_Counter.properties
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/DistCp_Counter.properties
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/DistCp_Counter.properties
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/DistTool.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/DistTool.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/DistTool.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/DistTool.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/HadoopArchives.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/HadoopArchives.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/HadoopArchives.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/HadoopArchives.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/Logalyzer.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/Logalyzer.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/Logalyzer.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/Logalyzer.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/package-info.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/package-info.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/package-info.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/package-info.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/AbstractClusterStory.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/AbstractClusterStory.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/AbstractClusterStory.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/AbstractClusterStory.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/CDFPiecewiseLinearRandomGenerator.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/CDFPiecewiseLinearRandomGenerator.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/CDFPiecewiseLinearRandomGenerator.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/CDFPiecewiseLinearRandomGenerator.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/CDFRandomGenerator.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/CDFRandomGenerator.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/CDFRandomGenerator.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/CDFRandomGenerator.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ClusterStory.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ClusterStory.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/ClusterStory.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ClusterStory.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ClusterTopologyReader.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ClusterTopologyReader.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/ClusterTopologyReader.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ClusterTopologyReader.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/CurrentJHParser.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/CurrentJHParser.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/CurrentJHParser.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/CurrentJHParser.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/DeepCompare.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/DeepCompare.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/DeepCompare.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/DeepCompare.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/DeepInequalityException.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/DeepInequalityException.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/DeepInequalityException.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/DeepInequalityException.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/DefaultInputDemuxer.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/DefaultInputDemuxer.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/DefaultInputDemuxer.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/DefaultInputDemuxer.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/DefaultOutputter.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/DefaultOutputter.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/DefaultOutputter.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/DefaultOutputter.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/DeskewedJobTraceReader.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/DeskewedJobTraceReader.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/DeskewedJobTraceReader.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/DeskewedJobTraceReader.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/Folder.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/Folder.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/Folder.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/Folder.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/Hadoop20JHParser.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/Hadoop20JHParser.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/Hadoop20JHParser.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/Hadoop20JHParser.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/HadoopLogsAnalyzer.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/HadoopLogsAnalyzer.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/HadoopLogsAnalyzer.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/HadoopLogsAnalyzer.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/Histogram.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/Histogram.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/Histogram.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/Histogram.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/HistoryEventEmitter.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/HistoryEventEmitter.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/HistoryEventEmitter.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/HistoryEventEmitter.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/InputDemuxer.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/InputDemuxer.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/InputDemuxer.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/InputDemuxer.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/Job20LineHistoryEventEmitter.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/Job20LineHistoryEventEmitter.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/Job20LineHistoryEventEmitter.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/Job20LineHistoryEventEmitter.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobBuilder.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobBuilder.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobBuilder.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobBuilder.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobConfPropertyNames.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobConfPropertyNames.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobConfPropertyNames.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobConfPropertyNames.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobConfigurationParser.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobConfigurationParser.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobConfigurationParser.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobConfigurationParser.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobHistoryParser.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobHistoryParser.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobHistoryParser.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobHistoryParser.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobHistoryParserFactory.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobHistoryParserFactory.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobHistoryParserFactory.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobHistoryParserFactory.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobStory.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobStory.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobStory.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobStory.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobStoryProducer.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobStoryProducer.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobStoryProducer.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobStoryProducer.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobTraceReader.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobTraceReader.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobTraceReader.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobTraceReader.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/JsonObjectMapperParser.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/JsonObjectMapperParser.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/JsonObjectMapperParser.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/JsonObjectMapperParser.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/JsonObjectMapperWriter.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/JsonObjectMapperWriter.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/JsonObjectMapperWriter.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/JsonObjectMapperWriter.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/LogRecordType.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/LogRecordType.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/LogRecordType.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/LogRecordType.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedDiscreteCDF.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedDiscreteCDF.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedDiscreteCDF.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedDiscreteCDF.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedJob.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedJob.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedJob.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedJob.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedLocation.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedLocation.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedLocation.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedLocation.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedNetworkTopology.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedNetworkTopology.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedNetworkTopology.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedNetworkTopology.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedSingleRelativeRanking.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedSingleRelativeRanking.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedSingleRelativeRanking.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedSingleRelativeRanking.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedTask.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedTask.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedTask.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedTask.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedTaskAttempt.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedTaskAttempt.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedTaskAttempt.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedTaskAttempt.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/MachineNode.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/MachineNode.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/MachineNode.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/MachineNode.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/MapAttempt20LineHistoryEventEmitter.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/MapAttempt20LineHistoryEventEmitter.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/MapAttempt20LineHistoryEventEmitter.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/MapAttempt20LineHistoryEventEmitter.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/MapTaskAttemptInfo.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/MapTaskAttemptInfo.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/MapTaskAttemptInfo.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/MapTaskAttemptInfo.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/Node.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/Node.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/Node.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/Node.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/Outputter.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/Outputter.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/Outputter.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/Outputter.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/Pair.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/Pair.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/Pair.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/Pair.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ParsedConfigFile.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ParsedConfigFile.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/ParsedConfigFile.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ParsedConfigFile.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ParsedHost.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ParsedHost.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/ParsedHost.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ParsedHost.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ParsedLine.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ParsedLine.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/ParsedLine.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ParsedLine.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/PossiblyDecompressedInputStream.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/PossiblyDecompressedInputStream.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/PossiblyDecompressedInputStream.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/PossiblyDecompressedInputStream.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/Pre21JobHistoryConstants.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/Pre21JobHistoryConstants.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/Pre21JobHistoryConstants.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/Pre21JobHistoryConstants.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/RackNode.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/RackNode.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/RackNode.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/RackNode.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/RandomSeedGenerator.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/RandomSeedGenerator.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/RandomSeedGenerator.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/RandomSeedGenerator.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ReduceAttempt20LineHistoryEventEmitter.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ReduceAttempt20LineHistoryEventEmitter.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/ReduceAttempt20LineHistoryEventEmitter.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ReduceAttempt20LineHistoryEventEmitter.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ReduceTaskAttemptInfo.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ReduceTaskAttemptInfo.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/ReduceTaskAttemptInfo.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ReduceTaskAttemptInfo.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ResourceUsageMetrics.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ResourceUsageMetrics.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/ResourceUsageMetrics.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ResourceUsageMetrics.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/RewindableInputStream.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/RewindableInputStream.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/RewindableInputStream.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/RewindableInputStream.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/SingleEventEmitter.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/SingleEventEmitter.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/SingleEventEmitter.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/SingleEventEmitter.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/Task20LineHistoryEventEmitter.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/Task20LineHistoryEventEmitter.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/Task20LineHistoryEventEmitter.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/Task20LineHistoryEventEmitter.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/TaskAttempt20LineEventEmitter.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/TaskAttempt20LineEventEmitter.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/TaskAttempt20LineEventEmitter.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/TaskAttempt20LineEventEmitter.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/TaskAttemptInfo.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/TaskAttemptInfo.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/TaskAttemptInfo.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/TaskAttemptInfo.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/TaskInfo.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/TaskInfo.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/TaskInfo.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/TaskInfo.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/TopologyBuilder.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/TopologyBuilder.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/TopologyBuilder.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/TopologyBuilder.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/TraceBuilder.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/TraceBuilder.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/TraceBuilder.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/TraceBuilder.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/TreePath.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/TreePath.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/TreePath.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/TreePath.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/Version20LogInterfaceUtils.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/Version20LogInterfaceUtils.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/Version20LogInterfaceUtils.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/Version20LogInterfaceUtils.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ZombieCluster.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ZombieCluster.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/ZombieCluster.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ZombieCluster.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ZombieJob.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ZombieJob.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/ZombieJob.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ZombieJob.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ZombieJobProducer.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ZombieJobProducer.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/ZombieJobProducer.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/ZombieJobProducer.java
diff --git a/mapreduce/src/tools/org/apache/hadoop/tools/rumen/package-info.java b/hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/package-info.java
similarity index 100%
rename from mapreduce/src/tools/org/apache/hadoop/tools/rumen/package-info.java
rename to hadoop-mapreduce/src/tools/org/apache/hadoop/tools/rumen/package-info.java
diff --git a/mapreduce/src/webapps/job/analysejobhistory.jsp b/hadoop-mapreduce/src/webapps/job/analysejobhistory.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/analysejobhistory.jsp
rename to hadoop-mapreduce/src/webapps/job/analysejobhistory.jsp
diff --git a/mapreduce/src/webapps/job/index.html b/hadoop-mapreduce/src/webapps/job/index.html
similarity index 100%
rename from mapreduce/src/webapps/job/index.html
rename to hadoop-mapreduce/src/webapps/job/index.html
diff --git a/mapreduce/src/webapps/job/job_authorization_error.jsp b/hadoop-mapreduce/src/webapps/job/job_authorization_error.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/job_authorization_error.jsp
rename to hadoop-mapreduce/src/webapps/job/job_authorization_error.jsp
diff --git a/mapreduce/src/webapps/job/jobblacklistedtrackers.jsp b/hadoop-mapreduce/src/webapps/job/jobblacklistedtrackers.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/jobblacklistedtrackers.jsp
rename to hadoop-mapreduce/src/webapps/job/jobblacklistedtrackers.jsp
diff --git a/mapreduce/src/webapps/job/jobconf.jsp b/hadoop-mapreduce/src/webapps/job/jobconf.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/jobconf.jsp
rename to hadoop-mapreduce/src/webapps/job/jobconf.jsp
diff --git a/mapreduce/src/webapps/job/jobconf_history.jsp b/hadoop-mapreduce/src/webapps/job/jobconf_history.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/jobconf_history.jsp
rename to hadoop-mapreduce/src/webapps/job/jobconf_history.jsp
diff --git a/mapreduce/src/webapps/job/jobdetails.jsp b/hadoop-mapreduce/src/webapps/job/jobdetails.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/jobdetails.jsp
rename to hadoop-mapreduce/src/webapps/job/jobdetails.jsp
diff --git a/mapreduce/src/webapps/job/jobdetailshistory.jsp b/hadoop-mapreduce/src/webapps/job/jobdetailshistory.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/jobdetailshistory.jsp
rename to hadoop-mapreduce/src/webapps/job/jobdetailshistory.jsp
diff --git a/mapreduce/src/webapps/job/jobfailures.jsp b/hadoop-mapreduce/src/webapps/job/jobfailures.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/jobfailures.jsp
rename to hadoop-mapreduce/src/webapps/job/jobfailures.jsp
diff --git a/mapreduce/src/webapps/job/jobhistory.jsp b/hadoop-mapreduce/src/webapps/job/jobhistory.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/jobhistory.jsp
rename to hadoop-mapreduce/src/webapps/job/jobhistory.jsp
diff --git a/mapreduce/src/webapps/job/jobqueue_details.jsp b/hadoop-mapreduce/src/webapps/job/jobqueue_details.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/jobqueue_details.jsp
rename to hadoop-mapreduce/src/webapps/job/jobqueue_details.jsp
diff --git a/mapreduce/src/webapps/job/jobtable.jsp b/hadoop-mapreduce/src/webapps/job/jobtable.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/jobtable.jsp
rename to hadoop-mapreduce/src/webapps/job/jobtable.jsp
diff --git a/mapreduce/src/webapps/job/jobtasks.jsp b/hadoop-mapreduce/src/webapps/job/jobtasks.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/jobtasks.jsp
rename to hadoop-mapreduce/src/webapps/job/jobtasks.jsp
diff --git a/mapreduce/src/webapps/job/jobtaskshistory.jsp b/hadoop-mapreduce/src/webapps/job/jobtaskshistory.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/jobtaskshistory.jsp
rename to hadoop-mapreduce/src/webapps/job/jobtaskshistory.jsp
diff --git a/mapreduce/src/webapps/job/jobtracker.jsp b/hadoop-mapreduce/src/webapps/job/jobtracker.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/jobtracker.jsp
rename to hadoop-mapreduce/src/webapps/job/jobtracker.jsp
diff --git a/mapreduce/src/webapps/job/jobtracker.jspx b/hadoop-mapreduce/src/webapps/job/jobtracker.jspx
similarity index 100%
rename from mapreduce/src/webapps/job/jobtracker.jspx
rename to hadoop-mapreduce/src/webapps/job/jobtracker.jspx
diff --git a/mapreduce/src/webapps/job/machines.jsp b/hadoop-mapreduce/src/webapps/job/machines.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/machines.jsp
rename to hadoop-mapreduce/src/webapps/job/machines.jsp
diff --git a/mapreduce/src/webapps/job/queueinfo.jsp b/hadoop-mapreduce/src/webapps/job/queueinfo.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/queueinfo.jsp
rename to hadoop-mapreduce/src/webapps/job/queueinfo.jsp
diff --git a/mapreduce/src/webapps/job/queuetable.jsp b/hadoop-mapreduce/src/webapps/job/queuetable.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/queuetable.jsp
rename to hadoop-mapreduce/src/webapps/job/queuetable.jsp
diff --git a/mapreduce/src/webapps/job/taskdetails.jsp b/hadoop-mapreduce/src/webapps/job/taskdetails.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/taskdetails.jsp
rename to hadoop-mapreduce/src/webapps/job/taskdetails.jsp
diff --git a/mapreduce/src/webapps/job/taskdetailshistory.jsp b/hadoop-mapreduce/src/webapps/job/taskdetailshistory.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/taskdetailshistory.jsp
rename to hadoop-mapreduce/src/webapps/job/taskdetailshistory.jsp
diff --git a/mapreduce/src/webapps/job/taskstats.jsp b/hadoop-mapreduce/src/webapps/job/taskstats.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/taskstats.jsp
rename to hadoop-mapreduce/src/webapps/job/taskstats.jsp
diff --git a/mapreduce/src/webapps/job/taskstatshistory.jsp b/hadoop-mapreduce/src/webapps/job/taskstatshistory.jsp
similarity index 100%
rename from mapreduce/src/webapps/job/taskstatshistory.jsp
rename to hadoop-mapreduce/src/webapps/job/taskstatshistory.jsp
diff --git a/mapreduce/src/webapps/static/hadoop-logo.jpg b/hadoop-mapreduce/src/webapps/static/hadoop-logo.jpg
similarity index 100%
rename from mapreduce/src/webapps/static/hadoop-logo.jpg
rename to hadoop-mapreduce/src/webapps/static/hadoop-logo.jpg
Binary files differ
diff --git a/mapreduce/src/webapps/static/hadoop.css b/hadoop-mapreduce/src/webapps/static/hadoop.css
similarity index 100%
rename from mapreduce/src/webapps/static/hadoop.css
rename to hadoop-mapreduce/src/webapps/static/hadoop.css
diff --git a/mapreduce/src/webapps/static/jobconf.xsl b/hadoop-mapreduce/src/webapps/static/jobconf.xsl
similarity index 100%
rename from mapreduce/src/webapps/static/jobconf.xsl
rename to hadoop-mapreduce/src/webapps/static/jobconf.xsl
diff --git a/mapreduce/src/webapps/static/jobtracker.js b/hadoop-mapreduce/src/webapps/static/jobtracker.js
similarity index 100%
rename from mapreduce/src/webapps/static/jobtracker.js
rename to hadoop-mapreduce/src/webapps/static/jobtracker.js
diff --git a/mapreduce/src/webapps/task/index.html b/hadoop-mapreduce/src/webapps/task/index.html
similarity index 100%
rename from mapreduce/src/webapps/task/index.html
rename to hadoop-mapreduce/src/webapps/task/index.html
diff --git a/mapreduce/src/webapps/task/tasktracker.jsp b/hadoop-mapreduce/src/webapps/task/tasktracker.jsp
similarity index 100%
rename from mapreduce/src/webapps/task/tasktracker.jsp
rename to hadoop-mapreduce/src/webapps/task/tasktracker.jsp
diff --git a/mapreduce/ivy.xml b/mapreduce/ivy.xml
deleted file mode 100644
index ad9e735..0000000
--- a/mapreduce/ivy.xml
+++ /dev/null
@@ -1,130 +0,0 @@
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<ivy-module version="1.0">
- <info organisation="org.apache.hadoop" module="${ant.project.name}" revision="${version}">
- <license name="Apache 2.0"/>
- <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
- <description>
- Hadoop Core
- </description>
- </info>
- <configurations defaultconfmapping="default">
- <!--these match the Maven configurations-->
- <conf name="default" extends="master,runtime"/>
- <conf name="master" description="contains the artifact but no dependencies"/>
- <conf name="compile" description="contains the artifact but no dependencies"/>
- <conf name="runtime" description="runtime but not the artifact"/>
-
- <!--
- These public configurations contain the core dependencies for running hadoop client or server.
- The server is effectively a superset of the client.
- -->
- <!--Private configurations. -->
-
- <conf name="common" visibility="private" extends="compile" description="common artifacts"/>
- <conf name="mapred" visibility="private" extends="compile,runtime" description="Mapred dependent artifacts"/>
- <conf name="javadoc" visibility="private" description="artiracts required while performing doc generation" extends="common"/>
- <conf name="test" extends="master" visibility="private" description="the classpath needed to run tests"/>
- <conf name="package" extends="master" description="the classpath needed for packaging"/>
- <conf name="system" extends="test" visibility="private" description="the classpath needed to run system tests"/>
-
- <conf name="test-hdfswithmr" extends="test" visibility="private" description="the classpath needed to run tests"/>
-
- <conf name="releaseaudit" visibility="private" description="Artifacts required for releaseaudit target"/>
-
- <conf name="jdiff" visibility="private" extends="common"/>
- <conf name="checkstyle" visibility="private"/>
-
- </configurations>
-
- <publications>
- <!--get the artifact from our module name-->
- <artifact conf="master"/>
- </publications>
- <dependencies>
- <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="compile->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-common"
- rev="${hadoop-common.version}" conf="compile->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-common-test"
- rev="${hadoop-common.version}" conf="compile->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-hdfs"
- rev="${hadoop-hdfs.version}" conf="compile->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-common-instrumented"
- rev="${hadoop-common.version}" conf="system->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-hdfs-instrumented"
- rev="${hadoop-common.version}" conf="system->default"/>
- <dependency org="commons-logging" name="commons-logging"
- rev="${commons-logging.version}" conf="compile->master"/>
- <dependency org="log4j" name="log4j" rev="${log4j.version}"
- conf="compile->master"/>
-
- <dependency org="org.slf4j" name="slf4j-api" rev="${slf4j-api.version}"
- conf="compile->master"/>
- <dependency org="org.slf4j" name="slf4j-log4j12"
- rev="${slf4j-log4j12.version}" conf="mapred->master"/>
- <dependency org="org.apache.hadoop" name="hadoop-common-test"
- rev="${hadoop-common.version}" conf="test->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-hdfs-test"
- rev="${hadoop-hdfs.version}" conf="test->default"/>
-
- <dependency org="checkstyle" name="checkstyle" rev="${checkstyle.version}"
- conf="checkstyle->default"/>
-
- <dependency org="jdiff" name="jdiff" rev="${jdiff.version}"
- conf="jdiff->default"/>
- <dependency org="xerces" name="xerces" rev="${xerces.version}"
- conf="jdiff->default"/>
-
- <dependency org="org.apache.rat" name="apache-rat-tasks"
- rev="${rats-lib.version}" conf="releaseaudit->default"/>
- <dependency org="commons-lang" name="commons-lang"
- rev="${commons-lang.version}" conf="releaseaudit->default"/>
- <dependency org="commons-collections" name="commons-collections"
- rev="${commons-collections.version}"
- conf="releaseaudit->default"/>
-
- <dependency org="org.apache.lucene" name="lucene-core"
- rev="${lucene-core.version}" conf="javadoc->default"/>
- <dependency org="org.apache.hadoop" name="avro" rev="${avro.version}"
- conf="compile->default">
- <exclude module="ant"/>
- <exclude module="jetty"/>
- <exclude module="slf4j-simple"/>
- </dependency>
- <dependency org="junit" name="junit" rev="${junit.version}"
- conf="test->default"/>
- <dependency org="org.mockito" name="mockito-all" rev="${mockito-all.version}"
- conf="test->default"/>
- <dependency org="org.vafer" name="jdeb" rev="${jdeb.version}" conf="package->master"/>
- <dependency org="org.mortbay.jetty" name="jetty-servlet-tester" rev="${jetty.version}"
- conf="test->default"/>
- <!-- dependency addition for the fault injection -->
- <dependency org="org.aspectj" name="aspectjrt" rev="${aspectj.version}"
- conf="compile->default"/>
- <dependency org="org.aspectj" name="aspectjtools" rev="${aspectj.version}"
- conf="compile->default"/>
-
- <!-- Exclusions for transitive dependencies pulled in by log4j -->
- <exclude org="com.sun.jdmk"/>
- <exclude org="com.sun.jmx"/>
- <exclude org="javax.jms"/>
- <exclude org="javax.mail"/>
-
- </dependencies>
-
-</ivy-module>
diff --git a/mapreduce/ivy/libraries.properties b/mapreduce/ivy/libraries.properties
deleted file mode 100644
index 9d40aaa..0000000
--- a/mapreduce/ivy/libraries.properties
+++ /dev/null
@@ -1,83 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#This properties file lists the versions of the various artifacts used by hadoop and components.
-#It drives ivy and the generation of a maven POM
-
-#These are the versions of our dependencies (in alphabetical order)
-ant-task.version=2.0.10
-
-#Aspectj depedency for Fault injection
-#This property has to be updated synchronously with aop.xml
-aspectj.version=1.6.5
-
-avro.version=1.3.2
-
-checkstyle.version=4.2
-
-commons-cli.version=1.2
-commons-collections.version=3.1
-commons-httpclient.version=3.1
-commons-lang.version=2.5
-commons-logging.version=1.1.1
-commons-logging-api.version=1.1
-commons-el.version=1.0
-commons-fileupload.version=1.2
-commons-io.version=1.4
-commons-net.version=1.4.1
-core.version=3.1.1
-coreplugin.version=1.3.2
-
-ftplet-api.version=1.0.0
-ftpserver-core.version=1.0.0
-ftpserver-deprecated.version=1.0.0-M2
-
-hadoop-common.version=0.23.0-SNAPSHOT
-hadoop-hdfs.version=0.23.0-SNAPSHOT
-
-hsqldb.version=1.8.0.10
-
-ivy.version=2.1.0
-
-jasper.version=5.5.12
-jdeb.version=0.8
-jsp.version=2.1
-jsp-api.version=5.5.12
-jets3t.version=0.7.1
-jetty.version=6.1.14
-jetty-util.version=6.1.14
-junit.version=4.8.1
-jdiff.version=1.0.9
-
-kfs.version=0.3
-
-log4j.version=1.2.15
-lucene-core.version=2.3.1
-
-mina-core.version=2.0.0-M5
-
-mockito-all.version=1.8.2
-
-oro.version=2.0.8
-
-rats-lib.version=0.6
-
-servlet.version=4.0.6
-servlet-api-2.5.version=6.1.14
-servlet-api.version=2.5
-slf4j-api.version=1.5.11
-slf4j-log4j12.version=1.5.11
-
-wagon-http.version=1.0-beta-2
-xmlenc.version=0.52
-xerces.version=1.4.4
-
diff --git a/mapreduce/src/contrib/block_forensics/ivy.xml b/mapreduce/src/contrib/block_forensics/ivy.xml
deleted file mode 100644
index 6165cd0..0000000
--- a/mapreduce/src/contrib/block_forensics/ivy.xml
+++ /dev/null
@@ -1,48 +0,0 @@
-<?xml version="1.0" ?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<ivy-module version="1.0">
- <info organisation="org.apache.hadoop" module="${ant.project.name}">
- <license name="Apache 2.0"/>
- <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
- <description>
- Apache Hadoop
- </description>
- </info>
- <configurations defaultconfmapping="default">
- <!--these match the Maven configurations-->
- <conf name="default" extends="master,runtime"/>
- <conf name="master" description="contains the artifact but no dependencies"/>
- <conf name="runtime" description="runtime but not the artifact" />
-
- <conf name="common" visibility="private"
- extends="runtime"
- description="artifacts needed to compile/test the application"/>
- <conf name="test" visibility="private" extends="runtime"/>
- </configurations>
-
- <publications>
- <!--get the artifact from our module name-->
- <artifact conf="master"/>
- </publications>
- <dependencies>
- <dependency org="org.apache.hadoop" name="hadoop-common"
- rev="${hadoop-common.version}" conf="common->default"/>
- </dependencies>
-</ivy-module>
diff --git a/mapreduce/src/contrib/capacity-scheduler/ivy.xml b/mapreduce/src/contrib/capacity-scheduler/ivy.xml
deleted file mode 100644
index ccb8644..0000000
--- a/mapreduce/src/contrib/capacity-scheduler/ivy.xml
+++ /dev/null
@@ -1,87 +0,0 @@
-<?xml version="1.0" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<ivy-module version="1.0">
- <info organisation="org.apache.hadoop" module="${ant.project.name}">
- <license name="Apache 2.0"/>
- <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
- <description>
- Apache Hadoop
- </description>
- </info>
- <configurations defaultconfmapping="default">
- <!--these match the Maven configurations-->
- <conf name="default" extends="master,runtime"/>
- <conf name="master" description="contains the artifact but no dependencies"/>
- <conf name="runtime" description="runtime but not the artifact" />
-
- <conf name="common" visibility="private"
- extends="runtime"
- description="artifacts needed to compile/test the application"/>
- <conf name="test" visibility="private" extends="runtime"/>
- </configurations>
-
- <publications>
- <!--get the artifact from our module name-->
- <artifact conf="master"/>
- </publications>
- <dependencies>
- <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-common"
- rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-common-test"
- rev="${hadoop-common.version}" conf="test->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-hdfs"
- rev="${hadoop-hdfs.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-hdfs-test"
- rev="${hadoop-hdfs.version}" conf="test->default"/>
- <dependency org="commons-cli" name="commons-cli"
- rev="${commons-cli.version}" conf="common->default"/>
- <dependency org="commons-logging" name="commons-logging"
- rev="${commons-logging.version}" conf="common->default"/>
- <dependency org="junit" name="junit"
- rev="${junit.version}" conf="common->default"/>
- <dependency org="log4j" name="log4j"
- rev="${log4j.version}" conf="common->master"/>
- <dependency org="org.mortbay.jetty" name="jetty-util"
- rev="${jetty-util.version}" conf="common->master"/>
- <dependency org="org.mortbay.jetty" name="jetty"
- rev="${jetty.version}" conf="common->master"/>
- <dependency org="org.mortbay.jetty" name="jsp-api-2.1"
- rev="${jetty.version}" conf="common->master"/>
- <dependency org="org.mortbay.jetty" name="jsp-2.1"
- rev="${jetty.version}" conf="common->master"/>
- <dependency org="org.mortbay.jetty" name="servlet-api-2.5"
- rev="${servlet-api-2.5.version}" conf="common->master"/>
- <dependency org="commons-httpclient" name="commons-httpclient"
- rev="${commons-httpclient.version}" conf="common->master"/>
- <dependency org="org.apache.hadoop" name="avro"
- rev="${avro.version}" conf="common->default">
- <exclude module="ant"/>
- <exclude module="jetty"/>
- <exclude module="slf4j-simple"/>
- </dependency>
-
- <!-- Exclusions for transitive dependencies pulled in by log4j -->
- <exclude org="com.sun.jdmk"/>
- <exclude org="com.sun.jmx"/>
- <exclude org="javax.jms"/>
- <exclude org="javax.mail"/>
-
- </dependencies>
-</ivy-module>
diff --git a/mapreduce/src/contrib/data_join/ivy.xml b/mapreduce/src/contrib/data_join/ivy.xml
deleted file mode 100644
index c55443e..0000000
--- a/mapreduce/src/contrib/data_join/ivy.xml
+++ /dev/null
@@ -1,60 +0,0 @@
-<?xml version="1.0" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<ivy-module version="1.0">
- <info organisation="org.apache.hadoop" module="${ant.project.name}">
- <license name="Apache 2.0"/>
- <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
- <description>
- Apache Hadoop
- </description>
- </info>
- <configurations defaultconfmapping="default">
- <!--these match the Maven configurations-->
- <conf name="default" extends="master,runtime"/>
- <conf name="master" description="contains the artifact but no dependencies"/>
- <conf name="runtime" description="runtime but not the artifact" />
-
- <conf name="common" visibility="private"
- extends="runtime"
- description="artifacts needed to compile the application"/>
- <conf name="test" visibility="private" extends="runtime"/>
- </configurations>
-
- <publications>
- <!--get the artifact from our module name-->
- <artifact conf="master"/>
- </publications>
- <dependencies>
- <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-common" rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-common-test" rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-hdfs" rev="${hadoop-hdfs.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-hdfs-test" rev="${hadoop-hdfs.version}" conf="common->default"/>
- <dependency org="commons-logging" name="commons-logging" rev="${commons-logging.version}" conf="common->default"/>
- <dependency org="log4j" name="log4j" rev="${log4j.version}" conf="common->master"/>
- <dependency org="junit" name="junit" rev="${junit.version}" conf="common->default"/>
-
- <!-- Exclusions for transitive dependencies pulled in by log4j -->
- <exclude org="com.sun.jdmk"/>
- <exclude org="com.sun.jmx"/>
- <exclude org="javax.jms"/>
- <exclude org="javax.mail"/>
-
- </dependencies>
-</ivy-module>
diff --git a/mapreduce/src/contrib/dynamic-scheduler/ivy.xml b/mapreduce/src/contrib/dynamic-scheduler/ivy.xml
deleted file mode 100644
index 91b3b0e..0000000
--- a/mapreduce/src/contrib/dynamic-scheduler/ivy.xml
+++ /dev/null
@@ -1,62 +0,0 @@
-<?xml version="1.0" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<ivy-module version="1.0">
- <info organisation="org.apache.hadoop" module="${ant.project.name}">
- <license name="Apache 2.0"/>
- <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
- <description> Apache Hadoop contrib </description>
- </info>
- <configurations defaultconfmapping="default">
- <!--these match the Maven configurations-->
- <conf name="default" extends="master,runtime"/>
- <conf name="master" description="contains the artifact but no dependencies"/>
- <conf name="runtime" description="runtime but not the artifact" />
-
- <conf name="common" visibility="private" extends="runtime" description="common artifacts"/>
- <conf name="test" visibility="private" extends="runtime"/>
- </configurations>
-
- <publications>
- <artifact conf="master"/>
- </publications>
-
- <dependencies>
- <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-common" rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="org.mortbay.jetty" name="jetty" rev="${jetty.version}" conf="common->master"/>
- <dependency org="org.mortbay.jetty" name="jetty-util" rev="${jetty-util.version}" conf="common->master"/>
- <dependency org="tomcat" name="jasper-runtime" rev="${jasper.version}" conf="common->master"/>
- <dependency org="tomcat" name="jasper-compiler" rev="${jasper.version}" conf="common->master"/>
- <dependency org="commons-el" name="commons-el" rev="${commons-el.version}" conf="common->master"/>
- <dependency org="commons-logging" name="commons-logging" rev="${commons-logging.version}" conf="common->master"/>
- <dependency org="log4j" name="log4j" rev="${log4j.version}" conf="common->master"/>
- <dependency org="net.java.dev.jets3t" name="jets3t" rev="${jets3t.version}" conf="common->master"/>
- <dependency org="commons-net" name="commons-net" rev="${commons-net.version}" conf="common->master"/>
- <dependency org="org.mortbay.jetty" name="servlet-api-2.5" rev="${servlet-api-2.5.version}" conf="common->master"/>
- <dependency org="junit" name="junit" rev="${junit.version}" conf="common->default"/>
- <dependency org="org.slf4j" name="slf4j-api" rev="${slf4j-api.version}" conf="common->master"/>
-
- <!-- Exclusions for transitive dependencies pulled in by log4j -->
- <exclude org="com.sun.jdmk"/>
- <exclude org="com.sun.jmx"/>
- <exclude org="javax.jms"/>
- <exclude org="javax.mail"/>
-
- </dependencies>
-</ivy-module>
diff --git a/mapreduce/src/contrib/fairscheduler/ivy.xml b/mapreduce/src/contrib/fairscheduler/ivy.xml
deleted file mode 100644
index 57fd94d0..0000000
--- a/mapreduce/src/contrib/fairscheduler/ivy.xml
+++ /dev/null
@@ -1,112 +0,0 @@
-<?xml version="1.0" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<ivy-module version="1.0">
- <info organisation="org.apache.hadoop" module="${ant.project.name}">
- <license name="Apache 2.0"/>
- <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
- <description>
- Apache Hadoop contrib
- </description>
- </info>
- <configurations defaultconfmapping="default">
- <!--these match the Maven configurations-->
- <conf name="default" extends="master,runtime"/>
- <conf name="master" description="contains the artifact but no dependencies"/>
- <conf name="runtime" description="runtime but not the artifact" />
-
- <conf name="common" visibility="private"
- description="artifacts needed to compile/test the application"/>
- <conf name="test" visibility="private" extends="runtime"/>
- </configurations>
-
- <publications>
- <!--get the artifact from our module name-->
- <artifact conf="master"/>
- </publications>
- <dependencies>
- <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-common"
- rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-common-test"
- rev="${hadoop-common.version}" conf="test->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-hdfs"
- rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-hdfs-test"
- rev="${hadoop-common.version}" conf="test->default"/>
- <dependency org="commons-logging"
- name="commons-logging"
- rev="${commons-logging.version}"
- conf="common->default"/>
- <dependency org="log4j"
- name="log4j"
- rev="${log4j.version}"
- conf="common->master"/>
- <dependency org="org.mortbay.jetty"
- name="servlet-api-2.5"
- rev="${servlet-api-2.5.version}"
- conf="common->default"/>
- <dependency org="junit"
- name="junit"
- rev="${junit.version}"
- conf="common->default"/>
- <dependency org="org.apache.hadoop"
- name="avro"
- rev="${avro.version}"
- conf="common->default">
- <exclude module="ant"/>
- <exclude module="jetty"/>
- <exclude module="slf4j-simple"/>
- </dependency>
- <dependency org="org.codehaus.jackson"
- name="jackson-mapper-asl"
- rev="${jackson.version}"
- conf="common->default"/>
- <dependency org="com.thoughtworks.paranamer"
- name="paranamer"
- rev="${paranamer.version}"
- conf="common->default"/>
- <dependency org="com.thoughtworks.paranamer"
- name="paranamer-ant"
- rev="${paranamer.version}"
- conf="common->default"/>
- <dependency org="org.mortbay.jetty"
- name="jetty-util"
- rev="${jetty-util.version}"
- conf="common->master"/>
- <dependency org="org.mortbay.jetty"
- name="jetty"
- rev="${jetty.version}"
- conf="common->master"/>
- <dependency org="org.mortbay.jetty"
- name="jsp-api-2.1"
- rev="${jetty.version}"
- conf="common->master"/>
- <dependency org="org.mortbay.jetty"
- name="jsp-2.1"
- rev="${jetty.version}"
- conf="common->master"/>
-
- <!-- Exclusions for transitive dependencies pulled in by log4j -->
- <exclude org="com.sun.jdmk"/>
- <exclude org="com.sun.jmx"/>
- <exclude org="javax.jms"/>
- <exclude org="javax.mail"/>
-
- </dependencies>
-</ivy-module>
diff --git a/mapreduce/src/contrib/gridmix/ivy.xml b/mapreduce/src/contrib/gridmix/ivy.xml
deleted file mode 100644
index 488e75c..0000000
--- a/mapreduce/src/contrib/gridmix/ivy.xml
+++ /dev/null
@@ -1,137 +0,0 @@
-<?xml version="1.0" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<ivy-module version="1.0">
- <info organisation="org.apache.hadoop" module="${ant.project.name}">
- <license name="Apache 2.0"/>
- <description>Rumen</description>
- </info>
- <configurations defaultconfmapping="default">
- <!--these match the Maven configurations-->
- <conf name="default" extends="master,runtime"/>
- <conf name="master" description="contains the artifact but no dependencies"/>
- <conf name="runtime" description="runtime but not the artifact" />
-
- <conf name="common" visibility="private" extends="runtime"
- description="artifacts needed to compile/test the application"/>
- <conf name="test" visibility="private" extends="runtime"/>
- </configurations>
-
- <publications>
- <!--get the artifact from our module name-->
- <artifact conf="master"/>
- </publications>
- <dependencies>
- <dependency org="org.apache.hadoop"
- name="hadoop-annotations"
- rev="${hadoop-common.version}"
- conf="common->default"/>
- <dependency org="org.apache.hadoop"
- name="hadoop-common"
- rev="${hadoop-common.version}"
- conf="common->default"/>
- <dependency org="org.apache.hadoop"
- name="hadoop-common-test"
- rev="${hadoop-common.version}"
- conf="test->default"/>
- <dependency org="org.apache.hadoop"
- name="hadoop-hdfs"
- rev="${hadoop-hdfs.version}"
- conf="common->default"/>
- <dependency org="org.apache.hadoop"
- name="hadoop-hdfs-test"
- rev="${hadoop-hdfs.version}"
- conf="test->default"/>
- <dependency org="commons-logging"
- name="commons-logging"
- rev="${commons-logging.version}"
- conf="common->default"/>
- <dependency org="log4j"
- name="log4j"
- rev="${log4j.version}"
- conf="common->master"/>
- <dependency org="junit"
- name="junit"
- rev="${junit.version}"
- conf="common->default"/>
-
- <!-- necessary for Mini*Clusters -->
- <dependency org="commons-httpclient"
- name="commons-httpclient"
- rev="${commons-httpclient.version}"
- conf="common->master"/>
- <dependency org="commons-codec"
- name="commons-codec"
- rev="${commons-codec.version}"
- conf="common->default"/>
- <dependency org="commons-net"
- name="commons-net"
- rev="${commons-net.version}"
- conf="common->default"/>
- <dependency org="org.mortbay.jetty"
- name="jetty"
- rev="${jetty.version}"
- conf="common->master"/>
- <dependency org="org.mortbay.jetty"
- name="jetty-util"
- rev="${jetty-util.version}"
- conf="common->master"/>
- <dependency org="org.mortbay.jetty"
- name="jsp-api-2.1"
- rev="${jetty.version}"
- conf="common->master"/>
- <dependency org="org.mortbay.jetty"
- name="jsp-2.1"
- rev="${jetty.version}"
- conf="common->master"/>
- <dependency org="org.mortbay.jetty"
- name="servlet-api-2.5"
- rev="${servlet-api-2.5.version}"
- conf="common->master"/>
- <dependency org="commons-cli"
- name="commons-cli"
- rev="${commons-cli.version}"
- conf="common->default"/>
- <dependency org="org.apache.hadoop"
- name="avro"
- rev="${avro.version}"
- conf="common->default">
- <exclude module="ant"/>
- <exclude module="jetty"/>
- <exclude module="slf4j-simple"/>
- </dependency>
- <dependency org="org.codehaus.jackson"
- name="jackson-mapper-asl"
- rev="${jackson.version}"
- conf="common->default"/>
- <dependency org="org.codehaus.jackson"
- name="jackson-core-asl"
- rev="${jackson.version}"
- conf="common->default"/>
- <dependency org="com.thoughtworks.paranamer"
- name="paranamer"
- rev="${paranamer.version}"
- conf="common->default"/>
-
- <!-- Exclusions for transitive dependencies pulled in by log4j -->
- <exclude org="com.sun.jdmk"/>
- <exclude org="com.sun.jmx"/>
- <exclude org="javax.jms"/>
- <exclude org="javax.mail"/>
-
- </dependencies>
-</ivy-module>
diff --git a/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestResourceUsageEmulators.java b/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestResourceUsageEmulators.java
deleted file mode 100644
index d7c653a..0000000
--- a/mapreduce/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestResourceUsageEmulators.java
+++ /dev/null
@@ -1,613 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import java.io.IOException;
-
-import org.junit.Test;
-import static org.junit.Assert.*;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.StatusReporter;
-import org.apache.hadoop.mapreduce.TaskAttemptID;
-import org.apache.hadoop.mapreduce.TaskInputOutputContext;
-import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
-import org.apache.hadoop.mapreduce.task.MapContextImpl;
-import org.apache.hadoop.mapreduce.util.ResourceCalculatorPlugin;
-import org.apache.hadoop.mapreduce.util.ResourceCalculatorPlugin.ProcResourceValues;
-import org.apache.hadoop.tools.rumen.ResourceUsageMetrics;
-import org.apache.hadoop.mapred.DummyResourceCalculatorPlugin;
-import org.apache.hadoop.mapred.gridmix.LoadJob.ResourceUsageMatcherRunner;
-import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.CumulativeCpuUsageEmulatorPlugin;
-import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.ResourceUsageEmulatorPlugin;
-import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.ResourceUsageMatcher;
-import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.CumulativeCpuUsageEmulatorPlugin.DefaultCpuUsageEmulator;
-
-/**
- * Test Gridmix's resource emulator framework and supported plugins.
- */
-public class TestResourceUsageEmulators {
- /**
- * A {@link ResourceUsageEmulatorPlugin} implementation for testing purpose.
- * It essentially creates a file named 'test' in the test directory.
- */
- static class TestResourceUsageEmulatorPlugin
- implements ResourceUsageEmulatorPlugin {
- static final Path rootTempDir =
- new Path(System.getProperty("test.build.data", "/tmp"));
- static final Path tempDir =
- new Path(rootTempDir, "TestResourceUsageEmulatorPlugin");
- static final String DEFAULT_IDENTIFIER = "test";
-
- private Path touchPath = null;
- private FileSystem fs = null;
-
- @Override
- public void emulate() throws IOException, InterruptedException {
- // add some time between 2 calls to emulate()
- try {
- Thread.sleep(1000); // sleep for 1s
- } catch (Exception e){}
-
- try {
- fs.delete(touchPath, false); // delete the touch file
- //TODO Search for a better touch utility
- fs.create(touchPath).close(); // recreate it
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- }
-
- protected String getIdentifier() {
- return DEFAULT_IDENTIFIER;
- }
-
- private static Path getFilePath(String id) {
- return new Path(tempDir, id);
- }
-
- private static Path getInitFilePath(String id) {
- return new Path(tempDir, id + ".init");
- }
-
- @Override
- public void initialize(Configuration conf, ResourceUsageMetrics metrics,
- ResourceCalculatorPlugin monitor, Progressive progress) {
- // add some time between 2 calls to initialize()
- try {
- Thread.sleep(1000); // sleep for 1s
- } catch (Exception e){}
-
- try {
- fs = FileSystem.getLocal(conf);
-
- Path initPath = getInitFilePath(getIdentifier());
- fs.delete(initPath, false); // delete the old file
- fs.create(initPath).close(); // create a new one
-
- touchPath = getFilePath(getIdentifier());
- fs.delete(touchPath, false);
- } catch (Exception e) {
-
- } finally {
- if (fs != null) {
- try {
- fs.deleteOnExit(tempDir);
- } catch (IOException ioe){}
- }
- }
- }
-
- // test if the emulation framework successfully loaded this plugin
- static long testInitialization(String id, Configuration conf)
- throws IOException {
- Path testPath = getInitFilePath(id);
- FileSystem fs = FileSystem.getLocal(conf);
- return fs.exists(testPath)
- ? fs.getFileStatus(testPath).getModificationTime()
- : 0;
- }
-
- // test if the emulation framework successfully loaded this plugin
- static long testEmulation(String id, Configuration conf)
- throws IOException {
- Path testPath = getFilePath(id);
- FileSystem fs = FileSystem.getLocal(conf);
- return fs.exists(testPath)
- ? fs.getFileStatus(testPath).getModificationTime()
- : 0;
- }
- }
-
- /**
- * Test implementation of {@link ResourceUsageEmulatorPlugin} which creates
- * a file named 'others' in the test directory.
- */
- static class TestOthers extends TestResourceUsageEmulatorPlugin {
- static final String ID = "others";
-
- @Override
- protected String getIdentifier() {
- return ID;
- }
- }
-
- /**
- * Test implementation of {@link ResourceUsageEmulatorPlugin} which creates
- * a file named 'cpu' in the test directory.
- */
- static class TestCpu extends TestResourceUsageEmulatorPlugin {
- static final String ID = "cpu";
-
- @Override
- protected String getIdentifier() {
- return ID;
- }
- }
-
- /**
- * Test {@link ResourceUsageMatcher}.
- */
- @Test
- public void testResourceUsageMatcher() throws Exception {
- ResourceUsageMatcher matcher = new ResourceUsageMatcher();
- Configuration conf = new Configuration();
- conf.setClass(ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS,
- TestResourceUsageEmulatorPlugin.class,
- ResourceUsageEmulatorPlugin.class);
- long currentTime = System.currentTimeMillis();
-
- matcher.configure(conf, null, null, null);
-
- matcher.matchResourceUsage();
-
- String id = TestResourceUsageEmulatorPlugin.DEFAULT_IDENTIFIER;
- long result =
- TestResourceUsageEmulatorPlugin.testInitialization(id, conf);
- assertTrue("Resource usage matcher failed to initialize the configured"
- + " plugin", result > currentTime);
- result = TestResourceUsageEmulatorPlugin.testEmulation(id, conf);
- assertTrue("Resource usage matcher failed to load and emulate the"
- + " configured plugin", result > currentTime);
-
- // test plugin order to first emulate cpu and then others
- conf.setStrings(ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS,
- TestCpu.class.getName() + "," + TestOthers.class.getName());
-
- matcher.configure(conf, null, null, null);
-
- // test the initialization order
- long time1 =
- TestResourceUsageEmulatorPlugin.testInitialization(TestCpu.ID, conf);
- long time2 =
- TestResourceUsageEmulatorPlugin.testInitialization(TestOthers.ID,
- conf);
- assertTrue("Resource usage matcher failed to initialize the configured"
- + " plugins in order", time1 < time2);
-
- matcher.matchResourceUsage();
-
- // Note that the cpu usage emulator plugin is configured 1st and then the
- // others plugin.
- time1 =
- TestResourceUsageEmulatorPlugin.testInitialization(TestCpu.ID, conf);
- time2 =
- TestResourceUsageEmulatorPlugin.testInitialization(TestOthers.ID,
- conf);
- assertTrue("Resource usage matcher failed to load the configured plugins",
- time1 < time2);
- }
-
- /**
- * Fakes the cumulative usage using {@link FakeCpuUsageEmulatorCore}.
- */
- static class FakeResourceUsageMonitor extends DummyResourceCalculatorPlugin {
- private FakeCpuUsageEmulatorCore core;
-
- public FakeResourceUsageMonitor(FakeCpuUsageEmulatorCore core) {
- this.core = core;
- }
-
- /**
- * A dummy CPU usage monitor. Every call to
- * {@link ResourceCalculatorPlugin#getCumulativeCpuTime()} will return the
- * value of {@link FakeCpuUsageEmulatorCore#getNumCalls()}.
- */
- @Override
- public long getCumulativeCpuTime() {
- return core.getCpuUsage();
- }
-
- /**
- * Returns a {@link ProcResourceValues} with cumulative cpu usage
- * computed using {@link #getCumulativeCpuTime()}.
- */
- @Override
- public ProcResourceValues getProcResourceValues() {
- long usageValue = getCumulativeCpuTime();
- return new ProcResourceValues(usageValue, -1, -1);
- }
- }
-
- /**
- * A dummy {@link Progressive} implementation that allows users to set the
- * progress for testing. The {@link Progressive#getProgress()} call will
- * return the last progress value set using
- * {@link FakeProgressive#setProgress(float)}.
- */
- static class FakeProgressive implements Progressive {
- private float progress = 0F;
- @Override
- public float getProgress() {
- return progress;
- }
-
- void setProgress(float progress) {
- this.progress = progress;
- }
- }
-
- /**
- * A dummy reporter for {@link LoadJob.ResourceUsageMatcherRunner}.
- */
- private static class DummyReporter extends StatusReporter {
- private Progressive progress;
-
- DummyReporter(Progressive progress) {
- this.progress = progress;
- }
-
- @Override
- public org.apache.hadoop.mapreduce.Counter getCounter(Enum<?> name) {
- return null;
- }
-
- @Override
- public org.apache.hadoop.mapreduce.Counter getCounter(String group,
- String name) {
- return null;
- }
-
- @Override
- public void progress() {
- }
-
- @Override
- public float getProgress() {
- return progress.getProgress();
- }
-
- @Override
- public void setStatus(String status) {
- }
- }
-
- // Extends ResourceUsageMatcherRunner for testing.
- @SuppressWarnings("unchecked")
- private static class FakeResourceUsageMatcherRunner
- extends ResourceUsageMatcherRunner {
- FakeResourceUsageMatcherRunner(TaskInputOutputContext context,
- ResourceUsageMetrics metrics) {
- super(context, metrics);
- }
-
- // test ResourceUsageMatcherRunner
- void test() throws Exception {
- super.match();
- }
- }
-
- /**
- * Test {@link LoadJob.ResourceUsageMatcherRunner}.
- */
- @Test
- @SuppressWarnings("unchecked")
- public void testResourceUsageMatcherRunner() throws Exception {
- Configuration conf = new Configuration();
- FakeProgressive progress = new FakeProgressive();
-
- // set the resource calculator plugin
- conf.setClass(TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN,
- DummyResourceCalculatorPlugin.class,
- ResourceCalculatorPlugin.class);
- // set the resources
- // set the resource implementation class
- conf.setClass(ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS,
- TestResourceUsageEmulatorPlugin.class,
- ResourceUsageEmulatorPlugin.class);
-
- long currentTime = System.currentTimeMillis();
-
- // initialize the matcher class
- TaskAttemptID id = new TaskAttemptID("test", 1, TaskType.MAP, 1, 1);
- StatusReporter reporter = new DummyReporter(progress);
- TaskInputOutputContext context =
- new MapContextImpl(conf, id, null, null, null, reporter, null);
- FakeResourceUsageMatcherRunner matcher =
- new FakeResourceUsageMatcherRunner(context, null);
-
- // check if the matcher initialized the plugin
- String identifier = TestResourceUsageEmulatorPlugin.DEFAULT_IDENTIFIER;
- long initTime =
- TestResourceUsageEmulatorPlugin.testInitialization(identifier, conf);
- assertTrue("ResourceUsageMatcherRunner failed to initialize the"
- + " configured plugin", initTime > currentTime);
-
- // check the progress
- assertEquals("Progress mismatch in ResourceUsageMatcherRunner",
- 0, progress.getProgress(), 0D);
-
- // call match() and check progress
- progress.setProgress(0.01f);
- currentTime = System.currentTimeMillis();
- matcher.test();
- long emulateTime =
- TestResourceUsageEmulatorPlugin.testEmulation(identifier, conf);
- assertTrue("ProgressBasedResourceUsageMatcher failed to load and emulate"
- + " the configured plugin", emulateTime > currentTime);
- }
-
- /**
- * Test {@link CumulativeCpuUsageEmulatorPlugin}'s core CPU usage emulation
- * engine.
- */
- @Test
- public void testCpuUsageEmulator() throws IOException {
- // test CpuUsageEmulator calibration with fake resource calculator plugin
- long target = 100000L; // 100 secs
- int unitUsage = 50;
- FakeCpuUsageEmulatorCore fakeCpuEmulator = new FakeCpuUsageEmulatorCore();
- fakeCpuEmulator.setUnitUsage(unitUsage);
- FakeResourceUsageMonitor fakeMonitor =
- new FakeResourceUsageMonitor(fakeCpuEmulator);
-
- // calibrate for 100ms
- fakeCpuEmulator.calibrate(fakeMonitor, target);
-
- // by default, CpuUsageEmulator.calibrate() will consume 100ms of CPU usage
- assertEquals("Fake calibration failed",
- 100, fakeMonitor.getCumulativeCpuTime());
- assertEquals("Fake calibration failed",
- 100, fakeCpuEmulator.getCpuUsage());
- // by default, CpuUsageEmulator.performUnitComputation() will be called
- // twice
- assertEquals("Fake calibration failed",
- 2, fakeCpuEmulator.getNumCalls());
- }
-
- /**
- * This is a dummy class that fakes CPU usage.
- */
- private static class FakeCpuUsageEmulatorCore
- extends DefaultCpuUsageEmulator {
- private int numCalls = 0;
- private int unitUsage = 1;
- private int cpuUsage = 0;
-
- @Override
- protected void performUnitComputation() {
- ++numCalls;
- cpuUsage += unitUsage;
- }
-
- int getNumCalls() {
- return numCalls;
- }
-
- int getCpuUsage() {
- return cpuUsage;
- }
-
- void reset() {
- numCalls = 0;
- cpuUsage = 0;
- }
-
- void setUnitUsage(int unitUsage) {
- this.unitUsage = unitUsage;
- }
- }
-
- // Creates a ResourceUsageMetrics object from the target usage
- static ResourceUsageMetrics createMetrics(long target) {
- ResourceUsageMetrics metrics = new ResourceUsageMetrics();
- metrics.setCumulativeCpuUsage(target);
- metrics.setVirtualMemoryUsage(target);
- metrics.setPhysicalMemoryUsage(target);
- metrics.setHeapUsage(target);
- return metrics;
- }
-
- /**
- * Test {@link CumulativeCpuUsageEmulatorPlugin}.
- */
- @Test
- public void testCumulativeCpuUsageEmulatorPlugin() throws Exception {
- Configuration conf = new Configuration();
- long targetCpuUsage = 1000L;
- int unitCpuUsage = 50;
-
- // fake progress indicator
- FakeProgressive fakeProgress = new FakeProgressive();
-
- // fake cpu usage generator
- FakeCpuUsageEmulatorCore fakeCore = new FakeCpuUsageEmulatorCore();
- fakeCore.setUnitUsage(unitCpuUsage);
-
- // a cumulative cpu usage emulator with fake core
- CumulativeCpuUsageEmulatorPlugin cpuPlugin =
- new CumulativeCpuUsageEmulatorPlugin(fakeCore);
-
- // test with invalid or missing resource usage value
- ResourceUsageMetrics invalidUsage = createMetrics(0);
- cpuPlugin.initialize(conf, invalidUsage, null, null);
-
- // test if disabled cpu emulation plugin's emulate() call is a no-operation
- // this will test if the emulation plugin is disabled or not
- int numCallsPre = fakeCore.getNumCalls();
- long cpuUsagePre = fakeCore.getCpuUsage();
- cpuPlugin.emulate();
- int numCallsPost = fakeCore.getNumCalls();
- long cpuUsagePost = fakeCore.getCpuUsage();
-
- // test if no calls are made cpu usage emulator core
- assertEquals("Disabled cumulative CPU usage emulation plugin works!",
- numCallsPre, numCallsPost);
-
- // test if no calls are made cpu usage emulator core
- assertEquals("Disabled cumulative CPU usage emulation plugin works!",
- cpuUsagePre, cpuUsagePost);
-
- // test with valid resource usage value
- ResourceUsageMetrics metrics = createMetrics(targetCpuUsage);
-
- // fake monitor
- ResourceCalculatorPlugin monitor = new FakeResourceUsageMonitor(fakeCore);
-
- // test with default emulation interval
- testEmulationAccuracy(conf, fakeCore, monitor, metrics, cpuPlugin,
- targetCpuUsage, targetCpuUsage / unitCpuUsage);
-
- // test with custom value for emulation interval of 20%
- conf.setFloat(CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL,
- 0.2F);
- testEmulationAccuracy(conf, fakeCore, monitor, metrics, cpuPlugin,
- targetCpuUsage, targetCpuUsage / unitCpuUsage);
-
- // test if emulation interval boundary is respected (unit usage = 1)
- // test the case where the current progress is less than threshold
- fakeProgress = new FakeProgressive(); // initialize
- fakeCore.reset();
- fakeCore.setUnitUsage(1);
- conf.setFloat(CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL,
- 0.25F);
- cpuPlugin.initialize(conf, metrics, monitor, fakeProgress);
- // take a snapshot after the initialization
- long initCpuUsage = monitor.getCumulativeCpuTime();
- long initNumCalls = fakeCore.getNumCalls();
- // test with 0 progress
- testEmulationBoundary(0F, fakeCore, fakeProgress, cpuPlugin, initCpuUsage,
- initNumCalls, "[no-op, 0 progress]");
- // test with 24% progress
- testEmulationBoundary(0.24F, fakeCore, fakeProgress, cpuPlugin,
- initCpuUsage, initNumCalls, "[no-op, 24% progress]");
- // test with 25% progress
- // target = 1000ms, target emulation at 25% = 250ms,
- // weighed target = 1000 * 0.25^4 (we are using progress^4 as the weight)
- // ~ 4
- // but current usage = init-usage = 100, hence expected = 100
- testEmulationBoundary(0.25F, fakeCore, fakeProgress, cpuPlugin,
- initCpuUsage, initNumCalls, "[op, 25% progress]");
-
- // test with 80% progress
- // target = 1000ms, target emulation at 80% = 800ms,
- // weighed target = 1000 * 0.25^4 (we are using progress^4 as the weight)
- // ~ 410
- // current-usage = init-usage = 100, hence expected-usage = 410
- testEmulationBoundary(0.80F, fakeCore, fakeProgress, cpuPlugin, 410, 410,
- "[op, 80% progress]");
-
- // now test if the final call with 100% progress ramps up the CPU usage
- testEmulationBoundary(1F, fakeCore, fakeProgress, cpuPlugin, targetCpuUsage,
- targetCpuUsage, "[op, 100% progress]");
-
- // test if emulation interval boundary is respected (unit usage = 50)
- // test the case where the current progress is less than threshold
- fakeProgress = new FakeProgressive(); // initialize
- fakeCore.reset();
- fakeCore.setUnitUsage(unitCpuUsage);
- conf.setFloat(CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL,
- 0.40F);
- cpuPlugin.initialize(conf, metrics, monitor, fakeProgress);
- // take a snapshot after the initialization
- initCpuUsage = monitor.getCumulativeCpuTime();
- initNumCalls = fakeCore.getNumCalls();
- // test with 0 progress
- testEmulationBoundary(0F, fakeCore, fakeProgress, cpuPlugin, initCpuUsage,
- initNumCalls, "[no-op, 0 progress]");
- // test with 39% progress
- testEmulationBoundary(0.39F, fakeCore, fakeProgress, cpuPlugin,
- initCpuUsage, initNumCalls, "[no-op, 39% progress]");
- // test with 40% progress
- // target = 1000ms, target emulation at 40% = 4000ms,
- // weighed target = 1000 * 0.40^4 (we are using progress^4 as the weight)
- // ~ 26
- // current-usage = init-usage = 100, hence expected-usage = 100
- testEmulationBoundary(0.40F, fakeCore, fakeProgress, cpuPlugin,
- initCpuUsage, initNumCalls, "[op, 40% progress]");
-
- // test with 90% progress
- // target = 1000ms, target emulation at 90% = 900ms,
- // weighed target = 1000 * 0.90^4 (we are using progress^4 as the weight)
- // ~ 657
- // current-usage = init-usage = 100, hence expected-usage = 657 but
- // the fake-core increases in steps of 50, hence final target = 700
- testEmulationBoundary(0.90F, fakeCore, fakeProgress, cpuPlugin, 700,
- 700 / unitCpuUsage, "[op, 90% progress]");
-
- // now test if the final call with 100% progress ramps up the CPU usage
- testEmulationBoundary(1F, fakeCore, fakeProgress, cpuPlugin, targetCpuUsage,
- targetCpuUsage / unitCpuUsage, "[op, 100% progress]");
- }
-
- // test whether the CPU usage emulator achieves the desired target using
- // desired calls to the underling core engine.
- private static void testEmulationAccuracy(Configuration conf,
- FakeCpuUsageEmulatorCore fakeCore,
- ResourceCalculatorPlugin monitor,
- ResourceUsageMetrics metrics,
- CumulativeCpuUsageEmulatorPlugin cpuPlugin,
- long expectedTotalCpuUsage, long expectedTotalNumCalls)
- throws Exception {
- FakeProgressive fakeProgress = new FakeProgressive();
- fakeCore.reset();
- cpuPlugin.initialize(conf, metrics, monitor, fakeProgress);
- int numLoops = 0;
- while (fakeProgress.getProgress() < 1) {
- ++numLoops;
- float progress = (float)numLoops / 100;
- fakeProgress.setProgress(progress);
- cpuPlugin.emulate();
- }
-
- // test if the resource plugin shows the expected invocations
- assertEquals("Cumulative cpu usage emulator plugin failed (num calls)!",
- expectedTotalNumCalls, fakeCore.getNumCalls(), 0L);
- // test if the resource plugin shows the expected usage
- assertEquals("Cumulative cpu usage emulator plugin failed (total usage)!",
- expectedTotalCpuUsage, fakeCore.getCpuUsage(), 0L);
- }
-
- // tests if the CPU usage emulation plugin emulates only at the expected
- // progress gaps
- private static void testEmulationBoundary(float progress,
- FakeCpuUsageEmulatorCore fakeCore, FakeProgressive fakeProgress,
- CumulativeCpuUsageEmulatorPlugin cpuPlugin, long expectedTotalCpuUsage,
- long expectedTotalNumCalls, String info) throws Exception {
- fakeProgress.setProgress(progress);
- cpuPlugin.emulate();
-
- assertEquals("Emulation interval test for cpu usage failed " + info + "!",
- expectedTotalCpuUsage, fakeCore.getCpuUsage(), 0L);
- assertEquals("Emulation interval test for num calls failed " + info + "!",
- expectedTotalNumCalls, fakeCore.getNumCalls(), 0L);
- }
-}
\ No newline at end of file
diff --git a/mapreduce/src/contrib/index/ivy.xml b/mapreduce/src/contrib/index/ivy.xml
deleted file mode 100644
index e17a683..0000000
--- a/mapreduce/src/contrib/index/ivy.xml
+++ /dev/null
@@ -1,61 +0,0 @@
-<?xml version="1.0" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<ivy-module version="1.0">
- <info organisation="org.apache.hadoop" module="${ant.project.name}">
- <license name="Apache 2.0"/>
- <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
- <description>
- Apache Hadoop
- </description>
- </info>
- <configurations defaultconfmapping="default">
- <!--these match the Maven configurations-->
- <conf name="default" extends="master,runtime"/>
- <conf name="master" description="contains the artifact but no dependencies"/>
- <conf name="runtime" description="runtime but not the artifact" />
-
- <conf name="common" visibility="private"
- extends="runtime"
- description="artifacts needed to compile/test the application"/>
- <conf name="test" visibility="private" extends="runtime"/>
- </configurations>
-
- <publications>
- <!--get the artifact from our module name-->
- <artifact conf="master"/>
- </publications>
- <dependencies>
- <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-common" rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-common-test" rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-hdfs" rev="${hadoop-hdfs.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-hdfs-test" rev="${hadoop-hdfs.version}" conf="common->default"/>
- <dependency org="commons-logging" name="commons-logging" rev="${commons-logging.version}" conf="common->default"/>
- <dependency org="log4j" name="log4j" rev="${log4j.version}" conf="common->master"/>
- <dependency org="org.apache.lucene" name="lucene-core" rev="${lucene-core.version}" conf="common->default"/>
- <dependency org="junit" name="junit" rev="${junit.version}" conf="common->default"/>
-
- <!-- Exclusions for transitive dependencies pulled in by log4j -->
- <exclude org="com.sun.jdmk"/>
- <exclude org="com.sun.jmx"/>
- <exclude org="javax.jms"/>
- <exclude org="javax.mail"/>
-
- </dependencies>
-</ivy-module>
diff --git a/mapreduce/src/contrib/mumak/ivy.xml b/mapreduce/src/contrib/mumak/ivy.xml
deleted file mode 100644
index fdc95e3..0000000
--- a/mapreduce/src/contrib/mumak/ivy.xml
+++ /dev/null
@@ -1,137 +0,0 @@
-<?xml version="1.0" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<ivy-module version="1.0">
- <info organisation="org.apache.hadoop" module="${ant.project.name}">
- <license name="Apache 2.0"/>
- <description>
- Mumak
- </description>
- </info>
- <configurations defaultconfmapping="default">
- <!--these match the Maven configurations-->
- <conf name="default" extends="master,runtime"/>
- <conf name="master" description="contains the artifact but no dependencies"/>
- <conf name="runtime" description="runtime but not the artifact" />
-
- <conf name="common" visibility="private"
- extends="runtime"
- description="artifacts needed to compile/test the application"/>
- <conf name="test" visibility="private" extends="master,common,runtime"/>
- </configurations>
-
- <publications>
- <!--get the artifact from our module name-->
- <artifact conf="master"/>
- </publications>
- <dependencies>
- <dependency org="org.apache.hadoop"
- name="hadoop-annotations"
- rev="${hadoop-common.version}"
- conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-common"
- rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-common-test"
- rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-hdfs"
- rev="${hadoop-hdfs.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-hdfs-test"
- rev="${hadoop-hdfs.version}" conf="test->default"/>
- <dependency org="commons-logging"
- name="commons-logging"
- rev="${commons-logging.version}"
- conf="common->default"/>
- <dependency org="log4j"
- name="log4j"
- rev="${log4j.version}"
- conf="common->master"/>
- <dependency org="org.codehaus.jackson"
- name="jackson-mapper-asl"
- rev="${jackson.version}"
- conf="common->default"/>
- <dependency org="org.codehaus.jackson"
- name="jackson-core-asl"
- rev="${jackson.version}"
- conf="common->default"/>
- <dependency org="junit"
- name="junit"
- rev="${junit.version}"
- conf="common->default"/>
- <dependency org="org.aspectj"
- name="aspectjrt"
- rev="${aspectj.version}"
- conf="common->default">
- </dependency>
- <dependency org="org.aspectj"
- name="aspectjtools"
- rev="${aspectj.version}"
- conf="common->default">
- </dependency>
- <!-- necessary for Mini*Clusters -->
- <dependency org="commons-httpclient"
- name="commons-httpclient"
- rev="${commons-httpclient.version}"
- conf="common->master"/>
- <dependency org="commons-codec"
- name="commons-codec"
- rev="${commons-codec.version}"
- conf="common->default"/>
- <dependency org="commons-net"
- name="commons-net"
- rev="${commons-net.version}"
- conf="common->default"/>
- <dependency org="org.mortbay.jetty"
- name="jetty"
- rev="${jetty.version}"
- conf="common->master"/>
- <dependency org="org.mortbay.jetty"
- name="jetty-util"
- rev="${jetty-util.version}"
- conf="common->master"/>
- <dependency org="org.mortbay.jetty"
- name="jsp-api-2.1"
- rev="${jetty.version}"
- conf="common->master"/>
- <dependency org="org.mortbay.jetty"
- name="jsp-2.1"
- rev="${jetty.version}"
- conf="common->master"/>
- <dependency org="org.mortbay.jetty"
- name="servlet-api-2.5"
- rev="${servlet-api-2.5.version}"
- conf="common->master"/>
- <dependency org="commons-cli"
- name="commons-cli"
- rev="${commons-cli.version}"
- conf="common->default"/>
- <dependency org="org.apache.hadoop"
- name="avro"
- rev="${avro.version}"
- conf="common->default">
- <exclude module="ant"/>
- <exclude module="jetty"/>
- <exclude module="slf4j-simple"/>
- </dependency>
-
- <!-- Exclusions for transitive dependencies pulled in by log4j -->
- <exclude org="com.sun.jdmk"/>
- <exclude org="com.sun.jmx"/>
- <exclude org="javax.jms"/>
- <exclude org="javax.mail"/>
-
- </dependencies>
-</ivy-module>
diff --git a/mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/MockSimulatorJobTracker.java b/mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/MockSimulatorJobTracker.java
deleted file mode 100644
index df84fad..0000000
--- a/mapreduce/src/contrib/mumak/src/test/org/apache/hadoop/mapred/MockSimulatorJobTracker.java
+++ /dev/null
@@ -1,484 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred;
-
-import java.io.IOException;
-import java.util.SortedMap;
-import java.util.TreeMap;
-
-import junit.framework.Assert;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ipc.ProtocolSignature;
-import org.apache.hadoop.mapred.TaskStatus.State;
-import org.apache.hadoop.mapred.TaskStatus.Phase;
-import org.apache.hadoop.mapreduce.ClusterMetrics;
-import org.apache.hadoop.mapreduce.Counters;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.mapreduce.JobPriority;
-import org.apache.hadoop.mapreduce.JobStatus;
-import org.apache.hadoop.mapreduce.QueueAclsInfo;
-import org.apache.hadoop.mapreduce.QueueInfo;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.authorize.AccessControlList;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.mapreduce.TaskAttemptID;
-import org.apache.hadoop.mapreduce.TaskReport;
-import org.apache.hadoop.mapreduce.TaskTrackerInfo;
-import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus;
-import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
-import org.apache.hadoop.tools.rumen.TaskInfo;
-import org.apache.hadoop.tools.rumen.MapTaskAttemptInfo;
-import org.apache.hadoop.tools.rumen.ReduceTaskAttemptInfo;
-import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.mapreduce.split.JobSplit.*;
-//
-// Mock jobtracker class that check heartbeat() in parameters and
-// sends responses based on a prepopulated table
-//
-public class MockSimulatorJobTracker implements InterTrackerProtocol,
- ClientProtocol {
- private final long simulationStartTime;
- private final int heartbeatInterval;
-
- // Helper table, used iff checkHeartbeats == true
- // Contains the expected task tracker status report at time t for all task
- // trackers identified by their name and the heartbeat response to send
- private SortedMap<Long, TreeMap<String, HeartbeatHelper>> heartbeats =
- new TreeMap<Long, TreeMap<String, HeartbeatHelper>>();
- private final boolean checkHeartbeats;
- private int jobId = 0;
-
- static final Log LOG = LogFactory.getLog(MockSimulatorJobTracker.class);
-
- public MockSimulatorJobTracker(long simulationStartTime,
- int heartbeatInterval,
- boolean checkHeartbeats) {
- this.simulationStartTime = simulationStartTime;
- this.heartbeatInterval = heartbeatInterval;
- this.checkHeartbeats = checkHeartbeats;
- }
-
- @Override
- public JobID getNewJobID() throws IOException {
- return new JobID("mockJT", jobId++);
- }
-
- @Override
- public JobStatus submitJob(
- JobID jobId, String jobSubmitDir, Credentials ts) throws IOException {
- JobStatus status = new JobStatus(jobId, 0.0f, 0.0f, 0.0f, 0.0f,
- JobStatus.State.RUNNING, JobPriority.NORMAL, "", "", "", "");
- return status;
- }
-
- @Override
- public HeartbeatResponse heartbeat(TaskTrackerStatus status,
- boolean restarted, boolean initialContact, boolean acceptNewTasks,
- short responseId) throws IOException {
- if (!(status instanceof SimulatorTaskTrackerStatus)) {
- throw new IllegalArgumentException(
- "Expecting SimulatorTaskTrackerStatus, actual status type "
- + status.getClass());
- }
- SimulatorTaskTrackerStatus trackerStatus =
- (SimulatorTaskTrackerStatus)status;
- long now = trackerStatus.getCurrentSimulationTime();
- String trackerName = status.getTrackerName();
-
- LOG.debug("Received heartbeat() from trackerName=" + trackerName +
- ", now=" + now);
-
- HeartbeatResponse response = new HeartbeatResponse();
- response.setHeartbeatInterval(heartbeatInterval);
- response.setActions(new TaskTrackerAction[0]);
-
- if (checkHeartbeats) {
- Assert.assertFalse("No more heartbeats were expected ", heartbeats.isEmpty());
- long nextToCheck = heartbeats.firstKey();
- // Missing heartbeat check
- Assert.assertTrue(nextToCheck <= now);
- if (nextToCheck < now) {
- LOG.debug("Simulation time progressed, last checked heartbeat at=" +
- nextToCheck + ", now=" + now + ". Checking if no " +
- "required heartbeats were missed in the past");
- SortedMap<String, HeartbeatHelper> previousHeartbeats =
- heartbeats.get(nextToCheck);
- Assert.assertNotNull(previousHeartbeats);
- Assert.assertTrue(previousHeartbeats.isEmpty());
- heartbeats.remove(nextToCheck);
- nextToCheck = heartbeats.firstKey();
- }
- Assert.assertEquals("Heartbeat at the wrong time", nextToCheck, now);
-
- SortedMap<String, HeartbeatHelper> currentHeartbeats =
- heartbeats.get(now);
- HeartbeatHelper currentHeartbeat = currentHeartbeats.get(trackerName);
- Assert.assertNotNull("Unknown task tracker name=" + trackerName,
- currentHeartbeat);
- currentHeartbeats.remove(trackerName);
-
- currentHeartbeat.checkHeartbeatParameters(status, acceptNewTasks);
-
- response.setActions(currentHeartbeat.getTaskTrackerActions());
- }
- return response;
- }
-
- //
- // Populates the mock jobtracker's helper & checker table with expected
- // empty reports from the task trackers and empty task actions to perform
- //
- public void expectEmptyHeartbeats(String taskTrackerName,
- int numHeartbeats) {
- long simulationTime = simulationStartTime;
- for (int i=0; i<numHeartbeats; i++) {
- TreeMap<String, HeartbeatHelper> hb = heartbeats.get(simulationTime);
- if (hb == null) {
- hb = new TreeMap<String, HeartbeatHelper>();
- heartbeats.put(simulationTime, hb);
- }
- hb.put(taskTrackerName, new HeartbeatHelper());
- simulationTime += heartbeatInterval;
- }
- }
-
- // Fills in all the expected and return heartbeat parameters corresponding
- // to running a map task on a task tracker.
- // Use killTime < 0 if not killed
- public void runMapTask(String taskTrackerName, TaskAttemptID taskId,
- long mapStart, long mapRuntime, long killHeartbeat) {
- long mapDone = mapStart + mapRuntime;
- long mapEndHeartbeat = nextHeartbeat(mapDone);
- final boolean isKilled = (killHeartbeat>=0);
- if (isKilled) {
- mapEndHeartbeat = nextHeartbeat(killHeartbeat + 1);
- }
-
- LOG.debug("mapStart=" + mapStart + ", mapDone=" + mapDone +
- ", mapEndHeartbeat=" + mapEndHeartbeat +
- ", killHeartbeat=" + killHeartbeat);
-
- final int numSlotsRequired = 1;
- org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi =
- org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);
- Task task = new MapTask("dummyjobfile", taskIdOldApi, 0, new TaskSplitIndex(),
- numSlotsRequired);
- // all byte counters are 0
- TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0);
- MapTaskAttemptInfo taskAttemptInfo =
- new MapTaskAttemptInfo(State.SUCCEEDED, taskInfo, mapRuntime);
- TaskTrackerAction action =
- new SimulatorLaunchTaskAction(task, taskAttemptInfo);
- heartbeats.get(mapStart).get(taskTrackerName).addTaskTrackerAction(action);
- if (isKilled) {
- action = new KillTaskAction(taskIdOldApi);
- heartbeats.get(killHeartbeat).get(taskTrackerName).addTaskTrackerAction(
- action);
- }
-
- for(long simulationTime = mapStart + heartbeatInterval;
- simulationTime <= mapEndHeartbeat;
- simulationTime += heartbeatInterval) {
- State state = simulationTime < mapEndHeartbeat ?
- State.RUNNING : State.SUCCEEDED;
- if (simulationTime == mapEndHeartbeat && isKilled) {
- state = State.KILLED;
- }
- MapTaskStatus mapStatus = new MapTaskStatus(
- task.getTaskID(), 0.0f, 0, state, "", "", null, Phase.MAP, null);
- heartbeats.get(simulationTime).get(taskTrackerName).addTaskReport(
- mapStatus);
- }
- }
-
- // Fills in all the expected and return heartbeat parameters corresponding
- // to running a reduce task on a task tracker.
- // Use killTime<0 if not killed
- public void runReduceTask(String taskTrackerName, TaskAttemptID taskId,
- long reduceStart, long mapDoneDelay,
- long reduceRuntime, long killHeartbeat) {
- long mapDone = nextHeartbeat(reduceStart + mapDoneDelay);
- long reduceDone = mapDone + reduceRuntime;
- long reduceEndHeartbeat = nextHeartbeat(reduceDone);
- final boolean isKilled = (killHeartbeat>=0);
- if (isKilled) {
- reduceEndHeartbeat = nextHeartbeat(killHeartbeat + 1);
- }
-
- LOG.debug("reduceStart=" + reduceStart + ", mapDone=" + mapDone +
- ", reduceDone=" + reduceDone +
- ", reduceEndHeartbeat=" + reduceEndHeartbeat +
- ", killHeartbeat=" + killHeartbeat);
-
- final int numSlotsRequired = 1;
- org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi =
- org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);
- Task task = new ReduceTask("dummyjobfile", taskIdOldApi, 0, 0,
- numSlotsRequired);
- // all byte counters are 0
- TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0);
- ReduceTaskAttemptInfo taskAttemptInfo =
- new ReduceTaskAttemptInfo(State.SUCCEEDED, taskInfo, 0, 0,
- reduceRuntime);
- TaskTrackerAction action =
- new SimulatorLaunchTaskAction(task, taskAttemptInfo);
- heartbeats.get(reduceStart).get(taskTrackerName).addTaskTrackerAction(
- action);
- if (!isKilled || mapDone < killHeartbeat) {
- action = new AllMapsCompletedTaskAction(task.getTaskID());
- heartbeats.get(mapDone).get(taskTrackerName).addTaskTrackerAction(
- action);
- }
- if (isKilled) {
- action = new KillTaskAction(taskIdOldApi);
- heartbeats.get(killHeartbeat).get(taskTrackerName).addTaskTrackerAction(
- action);
- }
-
- for(long simulationTime = reduceStart + heartbeatInterval;
- simulationTime <= reduceEndHeartbeat;
- simulationTime += heartbeatInterval) {
- State state = simulationTime < reduceEndHeartbeat ?
- State.RUNNING : State.SUCCEEDED;
- if (simulationTime == reduceEndHeartbeat && isKilled) {
- state = State.KILLED;
- }
- // mapDone is when the all maps done event delivered
- Phase phase = simulationTime <= mapDone ? Phase.SHUFFLE : Phase.REDUCE;
- ReduceTaskStatus reduceStatus = new ReduceTaskStatus(
- task.getTaskID(), 0.0f, 0, state, "", "", null, phase, null);
- heartbeats.get(simulationTime).get(taskTrackerName).addTaskReport(
- reduceStatus);
- }
- }
-
- // Should be called at the end of the simulation: Mock JT should have
- // consumed all entries from the heartbeats table by that time
- public void checkMissingHeartbeats() {
- Assert.assertEquals(1, heartbeats.size());
- long lastHeartbeat = heartbeats.firstKey();
- Assert.assertTrue("Missing heartbeats, last heartbeat=" + lastHeartbeat,
- heartbeats.get(lastHeartbeat).isEmpty());
- }
-
- // rounds up to the next heartbeat time
- public long nextHeartbeat(long time) {
- long numHeartbeats = (long)Math.ceil(
- (time - simulationStartTime)/(double)heartbeatInterval);
- return simulationStartTime + numHeartbeats * heartbeatInterval;
- }
-
- // Rest of InterTrackerProtocol follows, unused in simulation
- @Override
- public String getFilesystemName() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void reportTaskTrackerError(String taskTracker,
- String errorClass,
- String errorMessage) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public TaskCompletionEvent[] getTaskCompletionEvents(JobID jobid,
- int fromEventId, int maxEvents) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public String getSystemDir() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public String getStagingAreaDir() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public String getBuildVersion() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public long getProtocolVersion(String protocol, long clientVersion) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public TaskCompletionEvent[] getTaskCompletionEvents(
- org.apache.hadoop.mapred.JobID jobid, int fromEventId, int maxEvents)
- throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public TaskTrackerInfo[] getActiveTrackers() throws IOException,
- InterruptedException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public JobStatus[] getAllJobs() throws IOException, InterruptedException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public TaskTrackerInfo[] getBlacklistedTrackers() throws IOException,
- InterruptedException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public QueueInfo[] getChildQueues(String queueName) throws IOException,
- InterruptedException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public ClusterMetrics getClusterMetrics() throws IOException,
- InterruptedException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public Counters getJobCounters(JobID jobid) throws IOException,
- InterruptedException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public String getJobHistoryDir() throws IOException, InterruptedException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public JobStatus getJobStatus(JobID jobid) throws IOException,
- InterruptedException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public org.apache.hadoop.mapreduce.server.jobtracker.State getJobTrackerState()
- throws IOException, InterruptedException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public JobTrackerStatus getJobTrackerStatus() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public QueueInfo getQueue(String queueName) throws IOException,
- InterruptedException {
- throw new UnsupportedOperationException();
-
- }
-
- @Override
- public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException,
- InterruptedException {
- throw new UnsupportedOperationException();
-
- }
-
- @Override
- public QueueInfo[] getQueues() throws IOException, InterruptedException {
- throw new UnsupportedOperationException();
-
- }
-
- @Override
- public QueueInfo[] getRootQueues() throws IOException, InterruptedException {
- throw new UnsupportedOperationException();
-
- }
-
- @Override
- public AccessControlList getQueueAdmins(String queueName) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public String[] getTaskDiagnostics(TaskAttemptID taskId) throws IOException,
- InterruptedException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public TaskReport[] getTaskReports(JobID jobid, TaskType type)
- throws IOException, InterruptedException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public long getTaskTrackerExpiryInterval() throws IOException,
- InterruptedException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void killJob(JobID jobid) throws IOException, InterruptedException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public boolean killTask(TaskAttemptID taskId, boolean shouldFail)
- throws IOException, InterruptedException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void setJobPriority(JobID jobid, String priority) throws IOException,
- InterruptedException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void cancelDelegationToken(Token<DelegationTokenIdentifier> token
- ) throws IOException,
- InterruptedException {
- }
-
- @Override
- public Token<DelegationTokenIdentifier>
- getDelegationToken(Text renewer) throws IOException, InterruptedException {
- return null;
- }
-
- @Override
- public long renewDelegationToken(Token<DelegationTokenIdentifier> token
- ) throws IOException,InterruptedException{
- return 0;
- }
-
- @Override
- public ProtocolSignature getProtocolSignature(String protocol,
- long clientVersion, int clientMethodsHash) throws IOException {
- return ProtocolSignature.getProtocolSignature(
- this, protocol, clientVersion, clientMethodsHash);
- }
-}
diff --git a/mapreduce/src/contrib/raid/ivy.xml b/mapreduce/src/contrib/raid/ivy.xml
deleted file mode 100644
index 2fed1c3..0000000
--- a/mapreduce/src/contrib/raid/ivy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<ivy-module version="1.0">
- <info organisation="org.apache.hadoop" module="${ant.project.name}">
- <license name="Apache 2.0"/>
- <description>Rumen</description>
- </info>
- <configurations defaultconfmapping="default">
- <!--these match the Maven configurations-->
- <conf name="default" extends="master,runtime"/>
- <conf name="master" description="contains the artifact but no dependencies"/>
- <conf name="runtime" description="runtime but not the artifact" />
-
- <conf name="common" visibility="private" extends="runtime"
- description="artifacts needed to compile/test the application"/>
- <conf name="test" visibility="private" extends="runtime"/>
- </configurations>
-
- <publications>
- <!--get the artifact from our module name-->
- <artifact conf="master"/>
- </publications>
- <dependencies>
- <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop"
- name="hadoop-common"
- rev="${hadoop-common.version}"
- conf="common->default"/>
- <dependency org="org.apache.hadoop"
- name="hadoop-common-test"
- rev="${hadoop-common.version}"
- conf="test->default"/>
- <dependency org="org.apache.hadoop"
- name="hadoop-hdfs"
- rev="${hadoop-hdfs.version}"
- conf="common->default"/>
- <dependency org="org.apache.hadoop"
- name="hadoop-hdfs-test"
- rev="${hadoop-hdfs.version}"
- conf="test->default"/>
- <dependency org="commons-logging"
- name="commons-logging"
- rev="${commons-logging.version}"
- conf="common->default"/>
- <dependency org="log4j"
- name="log4j"
- rev="${log4j.version}"
- conf="common->master"/>
- <dependency org="junit"
- name="junit"
- rev="${junit.version}"
- conf="common->default"/>
-
- <!-- necessary for Mini*Clusters -->
- <dependency org="commons-httpclient"
- name="commons-httpclient"
- rev="${commons-httpclient.version}"
- conf="common->master"/>
- <dependency org="commons-codec"
- name="commons-codec"
- rev="${commons-codec.version}"
- conf="common->default"/>
- <dependency org="commons-net"
- name="commons-net"
- rev="${commons-net.version}"
- conf="common->default"/>
- <dependency org="org.mortbay.jetty"
- name="jetty"
- rev="${jetty.version}"
- conf="common->master"/>
- <dependency org="org.mortbay.jetty"
- name="jetty-util"
- rev="${jetty-util.version}"
- conf="common->master"/>
- <dependency org="org.mortbay.jetty"
- name="jsp-api-2.1"
- rev="${jetty.version}"
- conf="common->master"/>
- <dependency org="org.mortbay.jetty"
- name="jsp-2.1"
- rev="${jetty.version}"
- conf="common->master"/>
- <dependency org="org.mortbay.jetty"
- name="servlet-api-2.5"
- rev="${servlet-api-2.5.version}"
- conf="common->master"/>
- <dependency org="commons-cli"
- name="commons-cli"
- rev="${commons-cli.version}"
- conf="common->default"/>
- <dependency org="org.apache.hadoop"
- name="avro"
- rev="${avro.version}"
- conf="common->default">
- <exclude module="ant"/>
- <exclude module="jetty"/>
- <exclude module="slf4j-simple"/>
- </dependency>
- <dependency org="org.codehaus.jackson"
- name="jackson-mapper-asl"
- rev="${jackson.version}"
- conf="common->default"/>
- <dependency org="org.codehaus.jackson"
- name="jackson-core-asl"
- rev="${jackson.version}"
- conf="common->default"/>
- <dependency org="com.thoughtworks.paranamer"
- name="paranamer"
- rev="${paranamer.version}"
- conf="common->default"/>
-
- <!-- Exclusions for transitive dependencies pulled in by log4j -->
- <exclude org="com.sun.jdmk"/>
- <exclude org="com.sun.jmx"/>
- <exclude org="javax.jms"/>
- <exclude org="javax.mail"/>
-
- </dependencies>
-</ivy-module>
diff --git a/mapreduce/src/contrib/streaming/ivy.xml b/mapreduce/src/contrib/streaming/ivy.xml
deleted file mode 100644
index 1d65522..0000000
--- a/mapreduce/src/contrib/streaming/ivy.xml
+++ /dev/null
@@ -1,88 +0,0 @@
-<?xml version="1.0" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<ivy-module version="1.0">
- <info organisation="org.apache.hadoop" module="${ant.project.name}">
- <license name="Apache 2.0"/>
- <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
- <description>
- Apache Hadoop
- </description>
- </info>
- <configurations defaultconfmapping="default">
- <!--these match the Maven configurations-->
- <conf name="default" extends="master,runtime"/>
- <conf name="master" description="contains the artifact but no dependencies"/>
- <conf name="runtime" description="runtime but not the artifact" />
-
- <conf name="common" visibility="private"
- extends="runtime"
- description="artifacts needed to compile/test the application"/>
- <conf name="test" visibility="private" extends="runtime"/>
- </configurations>
-
- <publications>
- <!--get the artifact from our module name-->
- <artifact conf="master"/>
- </publications>
- <dependencies>
- <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-common"
- rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-common-test"
- rev="${hadoop-common.version}" conf="test->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-hdfs"
- rev="${hadoop-hdfs.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-hdfs-test"
- rev="${hadoop-hdfs.version}" conf="test->default"/>
- <dependency org="commons-cli" name="commons-cli"
- rev="${commons-cli.version}" conf="common->default"/>
- <dependency org="commons-logging" name="commons-logging"
- rev="${commons-logging.version}" conf="common->default"/>
- <dependency org="junit" name="junit"
- rev="${junit.version}" conf="common->default"/>
- <dependency org="org.mortbay.jetty" name="jetty-util"
- rev="${jetty-util.version}" conf="common->master"/>
- <dependency org="org.mortbay.jetty" name="jetty"
- rev="${jetty.version}" conf="common->master"/>
- <dependency org="org.mortbay.jetty" name="jsp-api-2.1"
- rev="${jetty.version}" conf="common->master"/>
- <dependency org="org.mortbay.jetty" name="jsp-2.1"
- rev="${jetty.version}" conf="common->master"/>
- <dependency org="org.mortbay.jetty" name="servlet-api-2.5"
- rev="${servlet-api-2.5.version}" conf="common->master"/>
- <dependency org="commons-httpclient" name="commons-httpclient"
- rev="${commons-httpclient.version}" conf="common->default"/>
- <dependency org="log4j" name="log4j"
- rev="${log4j.version}" conf="common->master"/>
- <dependency org="org.apache.hadoop" name="avro"
- rev="${avro.version}" conf="common->default">
- <exclude module="ant"/>
- <exclude module="jetty"/>
- <exclude module="slf4j-simple"/>
- </dependency>
- <dependency org="org.slf4j" name="slf4j-api"
- rev="${slf4j-api.version}" conf="common->master"/>
-
- <!-- Exclusions for transitive dependencies pulled in by log4j -->
- <exclude org="com.sun.jdmk"/>
- <exclude org="com.sun.jmx"/>
- <exclude org="javax.jms"/>
- <exclude org="javax.mail"/>
-
- </dependencies>
-</ivy-module>
diff --git a/mapreduce/src/contrib/vaidya/ivy.xml b/mapreduce/src/contrib/vaidya/ivy.xml
deleted file mode 100644
index 73043ef..0000000
--- a/mapreduce/src/contrib/vaidya/ivy.xml
+++ /dev/null
@@ -1,56 +0,0 @@
-<?xml version="1.0" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<ivy-module version="1.0">
- <info organisation="org.apache.hadoop" module="${ant.project.name}">
- <license name="Apache 2.0"/>
- <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
- <description>
- Apache Hadoop
- </description>
- </info>
- <configurations defaultconfmapping="default">
- <!--these match the Maven configurations-->
- <conf name="default" extends="master,runtime"/>
- <conf name="master" description="contains the artifact but no dependencies"/>
- <conf name="runtime" description="runtime but not the artifact" />
-
- <conf name="common" visibility="private"
- extends="runtime"
- description="artifacts needed to compile the application"/>
- <conf name="test" visibility="private" extends="runtime"/>
- </configurations>
-
- <publications>
- <!--get the artifact from our module name-->
- <artifact conf="master"/>
- </publications>
- <dependencies>
- <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop" name="hadoop-common" rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="log4j" name="log4j" rev="${log4j.version}" conf="common->master"/>
- <dependency org="commons-logging" name="commons-logging" rev="${commons-logging.version}" conf="common->default"/>
-
- <!-- Exclusions for transitive dependencies pulled in by log4j -->
- <exclude org="com.sun.jdmk"/>
- <exclude org="com.sun.jmx"/>
- <exclude org="javax.jms"/>
- <exclude org="javax.mail"/>
-
- </dependencies>
-</ivy-module>
diff --git a/mapreduce/src/contrib/vertica/ivy.xml b/mapreduce/src/contrib/vertica/ivy.xml
deleted file mode 100644
index a65a7e0..0000000
--- a/mapreduce/src/contrib/vertica/ivy.xml
+++ /dev/null
@@ -1,76 +0,0 @@
-<?xml version="1.0" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<ivy-module version="1.0">
- <info organisation="org.apache.hadoop" module="${ant.project.name}">
- <license name="Apache 2.0"/>
- <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
- <description>
- Apache Hadoop
- </description>
- </info>
- <configurations defaultconfmapping="default">
- <!--these match the Maven configurations-->
- <conf name="default" extends="master,runtime"/>
- <conf name="master" description="contains the artifact but no dependencies"/>
- <conf name="runtime" description="runtime but not the artifact" />
-
- <conf name="common" visibility="private"
- extends="runtime"
- description="artifacts needed to compile/test the application"/>
- <conf name="test" visibility="private" extends="runtime"/>
- </configurations>
-
- <publications>
- <!--get the artifact from our module name-->
- <artifact conf="master"/>
- </publications>
- <dependencies>
- <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="common->default"/>
- <dependency org="org.apache.hadoop"
- name="hadoop-common"
- rev="${hadoop-common.version}"
- conf="common->default"/>
- <dependency org="commons-logging"
- name="commons-logging"
- rev="${commons-logging.version}"
- conf="common->default"/>
- <dependency org="commons-httpclient"
- name="commons-httpclient"
- rev="${commons-httpclient.version}"
- conf="common->default"/>
- <dependency org="commons-cli"
- name="commons-cli"
- rev="${commons-cli.version}"
- conf="common->default"/>
- <dependency org="junit"
- name="junit"
- rev="${junit.version}"
- conf="common->default"/>
- <dependency org="log4j"
- name="log4j"
- rev="${log4j.version}"
- conf="common->master"/>
-
- <!-- Exclusions for transitive dependencies pulled in by log4j -->
- <exclude org="com.sun.jdmk"/>
- <exclude org="com.sun.jmx"/>
- <exclude org="javax.jms"/>
- <exclude org="javax.mail"/>
-
- </dependencies>
-</ivy-module>
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/BackupStore.java b/mapreduce/src/java/org/apache/hadoop/mapred/BackupStore.java
deleted file mode 100644
index aba3c7d..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapred/BackupStore.java
+++ /dev/null
@@ -1,619 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.NoSuchElementException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalDirAllocator;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.DataInputBuffer;
-import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.mapred.IFile.Reader;
-import org.apache.hadoop.mapred.IFile.Writer;
-import org.apache.hadoop.mapred.Merger.Segment;
-import org.apache.hadoop.mapreduce.MRConfig;
-import org.apache.hadoop.mapreduce.TaskAttemptID;
-
-/**
- * <code>BackupStore</code> is an utility class that is used to support
- * the mark-reset functionality of values iterator
- *
- * <p>It has two caches - a memory cache and a file cache where values are
- * stored as they are iterated, after a mark. On reset, values are retrieved
- * from these caches. Framework moves from the memory cache to the
- * file cache when the memory cache becomes full.
- *
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class BackupStore<K,V> {
-
- private static final Log LOG = LogFactory.getLog(BackupStore.class.getName());
- private static final int MAX_VINT_SIZE = 9;
- private static final int EOF_MARKER_SIZE = 2 * MAX_VINT_SIZE;
- private final TaskAttemptID tid;
-
- private MemoryCache memCache;
- private FileCache fileCache;
-
- List<Segment<K,V>> segmentList = new LinkedList<Segment<K,V>>();
- private int readSegmentIndex = 0;
- private int firstSegmentOffset = 0;
-
- private int currentKVOffset = 0;
- private int nextKVOffset = -1;
-
- private DataInputBuffer currentKey = null;
- private DataInputBuffer currentValue = new DataInputBuffer();
- private DataInputBuffer currentDiskValue = new DataInputBuffer();
-
- private boolean hasMore = false;
- private boolean inReset = false;
- private boolean clearMarkFlag = false;
- private boolean lastSegmentEOF = false;
-
- public BackupStore(Configuration conf, TaskAttemptID taskid)
- throws IOException {
-
- final float bufferPercent =
- conf.getFloat(JobContext.REDUCE_MARKRESET_BUFFER_PERCENT, 0f);
-
- if (bufferPercent > 1.0 || bufferPercent < 0.0) {
- throw new IOException(JobContext.REDUCE_MARKRESET_BUFFER_PERCENT +
- bufferPercent);
- }
-
- int maxSize = (int)Math.min(
- Runtime.getRuntime().maxMemory() * bufferPercent, Integer.MAX_VALUE);
-
- // Support an absolute size also.
- int tmp = conf.getInt(JobContext.REDUCE_MARKRESET_BUFFER_SIZE, 0);
- if (tmp > 0) {
- maxSize = tmp;
- }
-
- memCache = new MemoryCache(maxSize);
- fileCache = new FileCache(conf);
- tid = taskid;
-
- LOG.info("Created a new BackupStore with a memory of " + maxSize);
-
- }
-
- /**
- * Write the given K,V to the cache.
- * Write to memcache if space is available, else write to the filecache
- * @param key
- * @param value
- * @throws IOException
- */
- public void write(DataInputBuffer key, DataInputBuffer value)
- throws IOException {
-
- assert (key != null && value != null);
-
- if (fileCache.isActive()) {
- fileCache.write(key, value);
- return;
- }
-
- if (memCache.reserveSpace(key, value)) {
- memCache.write(key, value);
- } else {
- fileCache.activate();
- fileCache.write(key, value);
- }
- }
-
- public void mark() throws IOException {
-
- // We read one KV pair in advance in hasNext.
- // If hasNext has read the next KV pair from a new segment, but the
- // user has not called next() for that KV, then reset the readSegmentIndex
- // to the previous segment
-
- if (nextKVOffset == 0) {
- assert (readSegmentIndex != 0);
- assert (currentKVOffset != 0);
- readSegmentIndex --;
- }
-
- // just drop segments before the current active segment
-
- int i = 0;
- Iterator<Segment<K,V>> itr = segmentList.iterator();
- while (itr.hasNext()) {
- Segment<K,V> s = itr.next();
- if (i == readSegmentIndex) {
- break;
- }
- s.close();
- itr.remove();
- i++;
- LOG.debug("Dropping a segment");
- }
-
- // FirstSegmentOffset is the offset in the current segment from where we
- // need to start reading on the next reset
-
- firstSegmentOffset = currentKVOffset;
- readSegmentIndex = 0;
-
- LOG.debug("Setting the FirsSegmentOffset to " + currentKVOffset);
- }
-
- public void reset() throws IOException {
-
- // Create a new segment for the previously written records only if we
- // are not already in the reset mode
-
- if (!inReset) {
- if (fileCache.isActive) {
- fileCache.createInDiskSegment();
- } else {
- memCache.createInMemorySegment();
- }
- }
-
- inReset = true;
-
- // Reset the segments to the correct position from where the next read
- // should begin.
- for (int i = 0; i < segmentList.size(); i++) {
- Segment<K,V> s = segmentList.get(i);
- if (s.inMemory()) {
- int offset = (i == 0) ? firstSegmentOffset : 0;
- s.getReader().reset(offset);
- } else {
- s.closeReader();
- if (i == 0) {
- s.reinitReader(firstSegmentOffset);
- s.getReader().disableChecksumValidation();
- }
- }
- }
-
- currentKVOffset = firstSegmentOffset;
- nextKVOffset = -1;
- readSegmentIndex = 0;
- hasMore = false;
- lastSegmentEOF = false;
-
- LOG.debug("Reset - First segment offset is " + firstSegmentOffset +
- " Segment List Size is " + segmentList.size());
- }
-
- public boolean hasNext() throws IOException {
-
- if (lastSegmentEOF) {
- return false;
- }
-
- // We read the next KV from the cache to decide if there is any left.
- // Since hasNext can be called several times before the actual call to
- // next(), we use hasMore to avoid extra reads. hasMore is set to false
- // when the user actually consumes this record in next()
-
- if (hasMore) {
- return true;
- }
-
- Segment<K,V> seg = segmentList.get(readSegmentIndex);
- // Mark the current position. This would be set to currentKVOffset
- // when the user consumes this record in next().
- nextKVOffset = (int) seg.getActualPosition();
- if (seg.nextRawKey()) {
- currentKey = seg.getKey();
- seg.getValue(currentValue);
- hasMore = true;
- return true;
- } else {
- if (!seg.inMemory()) {
- seg.closeReader();
- }
- }
-
- // If this is the last segment, mark the lastSegmentEOF flag and return
- if (readSegmentIndex == segmentList.size() - 1) {
- nextKVOffset = -1;
- lastSegmentEOF = true;
- return false;
- }
-
- nextKVOffset = 0;
- readSegmentIndex ++;
-
- Segment<K,V> nextSegment = segmentList.get(readSegmentIndex);
-
- // We possibly are moving from a memory segment to a disk segment.
- // Reset so that we do not corrupt the in-memory segment buffer.
- // See HADOOP-5494
-
- if (!nextSegment.inMemory()) {
- currentValue.reset(currentDiskValue.getData(),
- currentDiskValue.getLength());
- nextSegment.init(null);
- }
-
- if (nextSegment.nextRawKey()) {
- currentKey = nextSegment.getKey();
- nextSegment.getValue(currentValue);
- hasMore = true;
- return true;
- } else {
- throw new IOException("New segment did not have even one K/V");
- }
- }
-
- public void next() throws IOException {
- if (!hasNext()) {
- throw new NoSuchElementException("iterate past last value");
- }
- // Reset hasMore. See comment in hasNext()
- hasMore = false;
- currentKVOffset = nextKVOffset;
- nextKVOffset = -1;
- }
-
- public DataInputBuffer nextValue() {
- return currentValue;
- }
-
- public DataInputBuffer nextKey() {
- return currentKey;
- }
-
- public void reinitialize() throws IOException {
- if (segmentList.size() != 0) {
- clearSegmentList();
- }
- memCache.reinitialize(true);
- fileCache.reinitialize();
- readSegmentIndex = firstSegmentOffset = 0;
- currentKVOffset = 0;
- nextKVOffset = -1;
- hasMore = inReset = clearMarkFlag = false;
- }
-
- /**
- * This function is called the ValuesIterator when a mark is called
- * outside of a reset zone.
- */
- public void exitResetMode() throws IOException {
- inReset = false;
- if (clearMarkFlag ) {
- // If a flag was set to clear mark, do the reinit now.
- // See clearMark()
- reinitialize();
- return;
- }
- if (!fileCache.isActive) {
- memCache.reinitialize(false);
- }
- }
-
- /** For writing the first key and value bytes directly from the
- * value iterators, pass the current underlying output stream
- * @param length The length of the impending write
- */
- public DataOutputStream getOutputStream(int length) throws IOException {
- if (memCache.reserveSpace(length)) {
- return memCache.dataOut;
- } else {
- fileCache.activate();
- return fileCache.writer.getOutputStream();
- }
- }
-
- /** This method is called by the valueIterators after writing the first
- * key and value bytes to the BackupStore
- * @param length
- */
- public void updateCounters(int length) {
- if (fileCache.isActive) {
- fileCache.writer.updateCountersForExternalAppend(length);
- } else {
- memCache.usedSize += length;
- }
- }
-
- public void clearMark() throws IOException {
- if (inReset) {
- // If we are in the reset mode, we just mark a flag and come out
- // The actual re initialization would be done when we exit the reset
- // mode
- clearMarkFlag = true;
- } else {
- reinitialize();
- }
- }
-
- private void clearSegmentList() throws IOException {
- for (Segment<K,V> segment: segmentList) {
- long len = segment.getLength();
- segment.close();
- if (segment.inMemory()) {
- memCache.unreserve(len);
- }
- }
- segmentList.clear();
- }
-
- class MemoryCache {
- private DataOutputBuffer dataOut;
- private int blockSize;
- private int usedSize;
- private final BackupRamManager ramManager;
-
- // Memory cache is made up of blocks.
- private int defaultBlockSize = 1024 * 1024;
-
- public MemoryCache(int maxSize) {
- ramManager = new BackupRamManager(maxSize);
- if (maxSize < defaultBlockSize) {
- defaultBlockSize = maxSize;
- }
- }
-
- public void unreserve(long len) {
- ramManager.unreserve((int)len);
- }
-
- /**
- * Re-initialize the memory cache.
- *
- * @param clearAll If true, re-initialize the ramManager also.
- */
- void reinitialize(boolean clearAll) {
- if (clearAll) {
- ramManager.reinitialize();
- }
- int allocatedSize = createNewMemoryBlock(defaultBlockSize,
- defaultBlockSize);
- assert(allocatedSize == defaultBlockSize || allocatedSize == 0);
- LOG.debug("Created a new mem block of " + allocatedSize);
- }
-
- private int createNewMemoryBlock(int requestedSize, int minSize) {
- int allocatedSize = ramManager.reserve(requestedSize, minSize);
- usedSize = 0;
- if (allocatedSize == 0) {
- dataOut = null;
- blockSize = 0;
- } else {
- dataOut = new DataOutputBuffer(allocatedSize);
- blockSize = allocatedSize;
- }
- return allocatedSize;
- }
-
- /**
- * This method determines if there is enough space left in the
- * memory cache to write to the requested length + space for
- * subsequent EOF makers.
- * @param length
- * @return true if enough space is available
- */
- boolean reserveSpace(int length) throws IOException {
- int availableSize = blockSize - usedSize;
- if (availableSize >= length + EOF_MARKER_SIZE) {
- return true;
- }
- // Not enough available. Close this block
- assert (!inReset);
-
- createInMemorySegment();
-
- // Create a new block
- int tmp = Math.max(length + EOF_MARKER_SIZE, defaultBlockSize);
- availableSize = createNewMemoryBlock(tmp,
- (length + EOF_MARKER_SIZE));
-
- return (availableSize == 0) ? false : true;
- }
-
- boolean reserveSpace(DataInputBuffer key, DataInputBuffer value)
- throws IOException {
- int keyLength = key.getLength() - key.getPosition();
- int valueLength = value.getLength() - value.getPosition();
-
- int requestedSize = keyLength + valueLength +
- WritableUtils.getVIntSize(keyLength) +
- WritableUtils.getVIntSize(valueLength);
- return reserveSpace(requestedSize);
- }
-
- /**
- * Write the key and value to the cache in the IFile format
- * @param key
- * @param value
- * @throws IOException
- */
- public void write(DataInputBuffer key, DataInputBuffer value)
- throws IOException {
- int keyLength = key.getLength() - key.getPosition();
- int valueLength = value.getLength() - value.getPosition();
- WritableUtils.writeVInt(dataOut, keyLength);
- WritableUtils.writeVInt(dataOut, valueLength);
- dataOut.write(key.getData(), key.getPosition(), keyLength);
- dataOut.write(value.getData(), value.getPosition(), valueLength);
- usedSize += keyLength + valueLength +
- WritableUtils.getVIntSize(keyLength) +
- WritableUtils.getVIntSize(valueLength);
- LOG.debug("ID: " + segmentList.size() + " WRITE TO MEM");
- }
-
- /**
- * This method creates a memory segment from the existing buffer
- * @throws IOException
- */
- void createInMemorySegment () throws IOException {
-
- // If nothing was written in this block because the record size
- // was greater than the allocated block size, just return.
- if (usedSize == 0) {
- ramManager.unreserve(blockSize);
- return;
- }
-
- // spaceAvailable would have ensured that there is enough space
- // left for the EOF markers.
- assert ((blockSize - usedSize) >= EOF_MARKER_SIZE);
-
- WritableUtils.writeVInt(dataOut, IFile.EOF_MARKER);
- WritableUtils.writeVInt(dataOut, IFile.EOF_MARKER);
-
- usedSize += EOF_MARKER_SIZE;
-
- ramManager.unreserve(blockSize - usedSize);
-
- Reader<K, V> reader =
- new org.apache.hadoop.mapreduce.task.reduce.InMemoryReader<K, V>(null,
- (org.apache.hadoop.mapred.TaskAttemptID) tid,
- dataOut.getData(), 0, usedSize);
- Segment<K, V> segment = new Segment<K, V>(reader, false);
- segmentList.add(segment);
- LOG.debug("Added Memory Segment to List. List Size is " +
- segmentList.size());
- }
- }
-
- class FileCache {
- private LocalDirAllocator lDirAlloc;
- private final Configuration conf;
- private final FileSystem fs;
- private boolean isActive = false;
-
- private Path file = null;
- private IFile.Writer<K,V> writer = null;
- private int spillNumber = 0;
-
- public FileCache(Configuration conf)
- throws IOException {
- this.conf = conf;
- this.fs = FileSystem.getLocal(conf);
- this.lDirAlloc = new LocalDirAllocator(MRConfig.LOCAL_DIR);
- }
-
- void write(DataInputBuffer key, DataInputBuffer value)
- throws IOException {
- if (writer == null) {
- // If spillNumber is 0, we should have called activate and not
- // come here at all
- assert (spillNumber != 0);
- writer = createSpillFile();
- }
- writer.append(key, value);
- LOG.debug("ID: " + segmentList.size() + " WRITE TO DISK");
- }
-
- void reinitialize() {
- spillNumber = 0;
- writer = null;
- isActive = false;
- }
-
- void activate() throws IOException {
- isActive = true;
- writer = createSpillFile();
- }
-
- void createInDiskSegment() throws IOException {
- assert (writer != null);
- writer.close();
- Segment<K,V> s = new Segment<K, V>(conf, fs, file, null, true);
- writer = null;
- segmentList.add(s);
- LOG.debug("Disk Segment added to List. Size is " + segmentList.size());
- }
-
- boolean isActive() { return isActive; }
-
- private Writer<K,V> createSpillFile() throws IOException {
- Path tmp =
- new Path(TaskTracker.OUTPUT + "/backup_" + tid.getId() + "_"
- + (spillNumber++) + ".out");
-
- LOG.info("Created file: " + tmp);
-
- file = lDirAlloc.getLocalPathForWrite(tmp.toUri().getPath(),
- -1, conf);
- return new Writer<K, V>(conf, fs, file);
- }
- }
-
- static class BackupRamManager implements RamManager {
-
- private int availableSize = 0;
- private final int maxSize;
-
- public BackupRamManager(int size) {
- availableSize = maxSize = size;
- }
-
- public boolean reserve(int requestedSize, InputStream in) {
- // Not used
- LOG.warn("Reserve(int, InputStream) not supported by BackupRamManager");
- return false;
- }
-
- int reserve(int requestedSize) {
- if (availableSize == 0) {
- return 0;
- }
- int reservedSize = Math.min(requestedSize, availableSize);
- availableSize -= reservedSize;
- LOG.debug("Reserving: " + reservedSize + " Requested: " + requestedSize);
- return reservedSize;
- }
-
- int reserve(int requestedSize, int minSize) {
- if (availableSize < minSize) {
- LOG.debug("No Space available. Available: " + availableSize +
- " MinSize: " + minSize);
- return 0;
- } else {
- return reserve(requestedSize);
- }
- }
-
- public void unreserve(int requestedSize) {
- availableSize += requestedSize;
- LOG.debug("Unreserving: " + requestedSize +
- ". Available: " + availableSize);
- }
-
- void reinitialize() {
- availableSize = maxSize;
- }
- }
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/JobACLsManager.java b/mapreduce/src/java/org/apache/hadoop/mapred/JobACLsManager.java
deleted file mode 100644
index 7a9a5f5..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapred/JobACLsManager.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.JobACL;
-import org.apache.hadoop.mapreduce.MRConfig;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authorize.AccessControlList;
-
-@InterfaceAudience.Private
-public class JobACLsManager {
-
- Configuration conf;
-
- public JobACLsManager(Configuration conf) {
- this.conf = conf;
- }
-
- public boolean areACLsEnabled() {
- return conf.getBoolean(MRConfig.MR_ACLS_ENABLED, false);
- }
-
- /**
- * Construct the jobACLs from the configuration so that they can be kept in
- * the memory. If authorization is disabled on the JT, nothing is constructed
- * and an empty map is returned.
- *
- * @return JobACL to AccessControlList map.
- */
- Map<JobACL, AccessControlList> constructJobACLs(Configuration conf) {
-
- Map<JobACL, AccessControlList> acls =
- new HashMap<JobACL, AccessControlList>();
-
- // Don't construct anything if authorization is disabled.
- if (!areACLsEnabled()) {
- return acls;
- }
-
- for (JobACL aclName : JobACL.values()) {
- String aclConfigName = aclName.getAclName();
- String aclConfigured = conf.get(aclConfigName);
- if (aclConfigured == null) {
- // If ACLs are not configured at all, we grant no access to anyone. So
- // jobOwner and cluster administrator _only_ can do 'stuff'
- aclConfigured = " ";
- }
- acls.put(aclName, new AccessControlList(aclConfigured));
- }
- return acls;
- }
-
- /**
- * If authorization is enabled, checks whether the user (in the callerUGI)
- * is authorized to perform the operation specified by 'jobOperation' on
- * the job by checking if the user is jobOwner or part of job ACL for the
- * specific job operation.
- * <ul>
- * <li>The owner of the job can do any operation on the job</li>
- * <li>For all other users/groups job-acls are checked</li>
- * </ul>
- * @param callerUGI
- * @param jobOperation
- * @param jobOwner
- * @param jobACL
- * @throws AccessControlException
- */
- public boolean checkAccess(UserGroupInformation callerUGI,
- JobACL jobOperation, String jobOwner, AccessControlList jobACL) {
-
- String user = callerUGI.getShortUserName();
- if (!areACLsEnabled()) {
- return true;
- }
-
- // Allow Job-owner for any operation on the job
- if (user.equals(jobOwner)
- || jobACL.isUserAllowed(callerUGI)) {
- return true;
- }
-
- return false;
- }
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/MROutputFiles.java b/mapreduce/src/java/org/apache/hadoop/mapred/MROutputFiles.java
deleted file mode 100644
index 3f64a10..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapred/MROutputFiles.java
+++ /dev/null
@@ -1,226 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.fs.LocalDirAllocator;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.MRConfig;
-
-/**
- * Manipulate the working area for the transient store for maps and reduces.
- *
- * This class is used by map and reduce tasks to identify the directories that
- * they need to write to/read from for intermediate files. The callers of
- * these methods are from the Child running the Task.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class MROutputFiles extends MapOutputFile {
-
- private LocalDirAllocator lDirAlloc =
- new LocalDirAllocator(MRConfig.LOCAL_DIR);
-
- public MROutputFiles() {
- }
-
- /**
- * Return the path to local map output file created earlier
- *
- * @return path
- * @throws IOException
- */
- @Override
- public Path getOutputFile()
- throws IOException {
- return lDirAlloc.getLocalPathToRead(TaskTracker.OUTPUT + Path.SEPARATOR
- + MAP_OUTPUT_FILENAME_STRING, getConf());
- }
-
- /**
- * Create a local map output file name.
- *
- * @param size the size of the file
- * @return path
- * @throws IOException
- */
- @Override
- public Path getOutputFileForWrite(long size)
- throws IOException {
- return lDirAlloc.getLocalPathForWrite(TaskTracker.OUTPUT + Path.SEPARATOR
- + MAP_OUTPUT_FILENAME_STRING, size, getConf());
- }
-
- /**
- * Create a local map output file name on the same volume.
- */
- @Override
- public Path getOutputFileForWriteInVolume(Path existing) {
- return new Path(existing.getParent(), MAP_OUTPUT_FILENAME_STRING);
- }
-
- /**
- * Return the path to a local map output index file created earlier
- *
- * @return path
- * @throws IOException
- */
- @Override
- public Path getOutputIndexFile()
- throws IOException {
- return lDirAlloc.getLocalPathToRead(TaskTracker.OUTPUT + Path.SEPARATOR
- + MAP_OUTPUT_FILENAME_STRING + MAP_OUTPUT_INDEX_SUFFIX_STRING,
- getConf());
- }
-
- /**
- * Create a local map output index file name.
- *
- * @param size the size of the file
- * @return path
- * @throws IOException
- */
- @Override
- public Path getOutputIndexFileForWrite(long size)
- throws IOException {
- return lDirAlloc.getLocalPathForWrite(TaskTracker.OUTPUT + Path.SEPARATOR
- + MAP_OUTPUT_FILENAME_STRING + MAP_OUTPUT_INDEX_SUFFIX_STRING,
- size, getConf());
- }
-
- /**
- * Create a local map output index file name on the same volume.
- */
- @Override
- public Path getOutputIndexFileForWriteInVolume(Path existing) {
- return new Path(existing.getParent(),
- MAP_OUTPUT_FILENAME_STRING + MAP_OUTPUT_INDEX_SUFFIX_STRING);
- }
-
- /**
- * Return a local map spill file created earlier.
- *
- * @param spillNumber the number
- * @return path
- * @throws IOException
- */
- @Override
- public Path getSpillFile(int spillNumber)
- throws IOException {
- return lDirAlloc.getLocalPathToRead(TaskTracker.OUTPUT + "/spill"
- + spillNumber + ".out", getConf());
- }
-
- /**
- * Create a local map spill file name.
- *
- * @param spillNumber the number
- * @param size the size of the file
- * @return path
- * @throws IOException
- */
- @Override
- public Path getSpillFileForWrite(int spillNumber, long size)
- throws IOException {
- return lDirAlloc.getLocalPathForWrite(TaskTracker.OUTPUT + "/spill"
- + spillNumber + ".out", size, getConf());
- }
-
- /**
- * Return a local map spill index file created earlier
- *
- * @param spillNumber the number
- * @return path
- * @throws IOException
- */
- @Override
- public Path getSpillIndexFile(int spillNumber)
- throws IOException {
- return lDirAlloc.getLocalPathToRead(TaskTracker.OUTPUT + "/spill"
- + spillNumber + ".out.index", getConf());
- }
-
- /**
- * Create a local map spill index file name.
- *
- * @param spillNumber the number
- * @param size the size of the file
- * @return path
- * @throws IOException
- */
- @Override
- public Path getSpillIndexFileForWrite(int spillNumber, long size)
- throws IOException {
- return lDirAlloc.getLocalPathForWrite(TaskTracker.OUTPUT + "/spill"
- + spillNumber + ".out.index", size, getConf());
- }
-
- /**
- * Return a local reduce input file created earlier
- *
- * @param mapId a map task id
- * @return path
- * @throws IOException
- */
- @Override
- public Path getInputFile(int mapId)
- throws IOException {
- return lDirAlloc.getLocalPathToRead(String.format(
- REDUCE_INPUT_FILE_FORMAT_STRING, TaskTracker.OUTPUT, Integer
- .valueOf(mapId)), getConf());
- }
-
- /**
- * Create a local reduce input file name.
- *
- * @param mapId a map task id
- * @param size the size of the file
- * @return path
- * @throws IOException
- */
- @Override
- public Path getInputFileForWrite(org.apache.hadoop.mapreduce.TaskID mapId,
- long size)
- throws IOException {
- return lDirAlloc.getLocalPathForWrite(String.format(
- REDUCE_INPUT_FILE_FORMAT_STRING, TaskTracker.OUTPUT, mapId.getId()),
- size, getConf());
- }
-
- /** Removes all of the files related to a task. */
- @Override
- public void removeAll()
- throws IOException {
- ((JobConf)getConf()).deleteLocalFiles(TaskTracker.OUTPUT);
- }
-
- @Override
- public void setConf(Configuration conf) {
- if (!(conf instanceof JobConf)) {
- conf = new JobConf(conf);
- }
- super.setConf(conf);
- }
-
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/MapTask.java b/mapreduce/src/java/org/apache/hadoop/mapred/MapTask.java
deleted file mode 100644
index d1bff52..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapred/MapTask.java
+++ /dev/null
@@ -1,1869 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.ByteBuffer;
-import java.nio.IntBuffer;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.locks.Condition;
-import java.util.concurrent.locks.ReentrantLock;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileSystem.Statistics;
-import org.apache.hadoop.fs.LocalFileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.DataInputBuffer;
-import org.apache.hadoop.io.RawComparator;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.SequenceFile.CompressionType;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.compress.CompressionCodec;
-import org.apache.hadoop.io.compress.DefaultCodec;
-import org.apache.hadoop.io.serializer.Deserializer;
-import org.apache.hadoop.io.serializer.SerializationFactory;
-import org.apache.hadoop.io.serializer.Serializer;
-import org.apache.hadoop.mapred.IFile.Writer;
-import org.apache.hadoop.mapred.Merger.Segment;
-import org.apache.hadoop.mapred.SortedRanges.SkipRangeIterator;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.mapreduce.TaskCounter;
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter;
-import org.apache.hadoop.mapreduce.lib.map.WrappedMapper;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter;
-import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitIndex;
-import org.apache.hadoop.mapreduce.task.MapContextImpl;
-import org.apache.hadoop.util.IndexedSortable;
-import org.apache.hadoop.util.IndexedSorter;
-import org.apache.hadoop.util.Progress;
-import org.apache.hadoop.util.QuickSort;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.hadoop.util.StringUtils;
-
-/** A Map task. */
-class MapTask extends Task {
- /**
- * The size of each record in the index file for the map-outputs.
- */
- public static final int MAP_OUTPUT_INDEX_RECORD_LENGTH = 24;
-
- private TaskSplitIndex splitMetaInfo = new TaskSplitIndex();
- private final static int APPROX_HEADER_LENGTH = 150;
-
- private static final Log LOG = LogFactory.getLog(MapTask.class.getName());
-
- private Progress mapPhase;
- private Progress sortPhase;
-
- { // set phase for this task
- setPhase(TaskStatus.Phase.MAP);
- getProgress().setStatus("map");
- }
-
- public MapTask() {
- super();
- }
-
- public MapTask(String jobFile, TaskAttemptID taskId,
- int partition, TaskSplitIndex splitIndex,
- int numSlotsRequired) {
- super(jobFile, taskId, partition, numSlotsRequired);
- this.splitMetaInfo = splitIndex;
- }
-
- @Override
- public boolean isMapTask() {
- return true;
- }
-
- @Override
- public void localizeConfiguration(JobConf conf)
- throws IOException {
- super.localizeConfiguration(conf);
- }
-
-
- @Override
- public TaskRunner createRunner(TaskTracker tracker,
- TaskTracker.TaskInProgress tip) {
- return new MapTaskRunner(tip, tracker, this.conf);
- }
-
- @Override
- public void write(DataOutput out) throws IOException {
- super.write(out);
- if (isMapOrReduce()) {
- splitMetaInfo.write(out);
- splitMetaInfo = null;
- }
- }
-
- @Override
- public void readFields(DataInput in) throws IOException {
- super.readFields(in);
- if (isMapOrReduce()) {
- splitMetaInfo.readFields(in);
- }
- }
-
- /**
- * This class wraps the user's record reader to update the counters and progress
- * as records are read.
- * @param <K>
- * @param <V>
- */
- class TrackedRecordReader<K, V>
- implements RecordReader<K,V> {
- private RecordReader<K,V> rawIn;
- private Counters.Counter fileInputByteCounter;
- private Counters.Counter inputRecordCounter;
- private TaskReporter reporter;
- private long bytesInPrev = -1;
- private long bytesInCurr = -1;
- private final Statistics fsStats;
-
- TrackedRecordReader(TaskReporter reporter, JobConf job)
- throws IOException{
- inputRecordCounter = reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
- fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ);
- this.reporter = reporter;
-
- Statistics matchedStats = null;
- if (this.reporter.getInputSplit() instanceof FileSplit) {
- matchedStats = getFsStatistics(((FileSplit) this.reporter
- .getInputSplit()).getPath(), job);
- }
- fsStats = matchedStats;
-
- bytesInPrev = getInputBytes(fsStats);
- rawIn = job.getInputFormat().getRecordReader(reporter.getInputSplit(),
- job, reporter);
- bytesInCurr = getInputBytes(fsStats);
- fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
- }
-
- public K createKey() {
- return rawIn.createKey();
- }
-
- public V createValue() {
- return rawIn.createValue();
- }
-
- public synchronized boolean next(K key, V value)
- throws IOException {
- boolean ret = moveToNext(key, value);
- if (ret) {
- incrCounters();
- }
- return ret;
- }
-
- protected void incrCounters() {
- inputRecordCounter.increment(1);
- }
-
- protected synchronized boolean moveToNext(K key, V value)
- throws IOException {
- bytesInPrev = getInputBytes(fsStats);
- boolean ret = rawIn.next(key, value);
- bytesInCurr = getInputBytes(fsStats);
- fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
- reporter.setProgress(getProgress());
- return ret;
- }
-
- public long getPos() throws IOException { return rawIn.getPos(); }
-
- public void close() throws IOException {
- bytesInPrev = getInputBytes(fsStats);
- rawIn.close();
- bytesInCurr = getInputBytes(fsStats);
- fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
- }
-
- public float getProgress() throws IOException {
- return rawIn.getProgress();
- }
- TaskReporter getTaskReporter() {
- return reporter;
- }
-
- private long getInputBytes(Statistics stats) {
- return stats == null ? 0 : stats.getBytesRead();
- }
- }
-
- /**
- * This class skips the records based on the failed ranges from previous
- * attempts.
- */
- class SkippingRecordReader<K, V> extends TrackedRecordReader<K,V> {
- private SkipRangeIterator skipIt;
- private SequenceFile.Writer skipWriter;
- private boolean toWriteSkipRecs;
- private TaskUmbilicalProtocol umbilical;
- private Counters.Counter skipRecCounter;
- private long recIndex = -1;
-
- SkippingRecordReader(TaskUmbilicalProtocol umbilical,
- TaskReporter reporter, JobConf job) throws IOException{
- super(reporter, job);
- this.umbilical = umbilical;
- this.skipRecCounter = reporter.getCounter(TaskCounter.MAP_SKIPPED_RECORDS);
- this.toWriteSkipRecs = toWriteSkipRecs() &&
- SkipBadRecords.getSkipOutputPath(conf)!=null;
- skipIt = getSkipRanges().skipRangeIterator();
- }
-
- public synchronized boolean next(K key, V value)
- throws IOException {
- if(!skipIt.hasNext()) {
- LOG.warn("Further records got skipped.");
- return false;
- }
- boolean ret = moveToNext(key, value);
- long nextRecIndex = skipIt.next();
- long skip = 0;
- while(recIndex<nextRecIndex && ret) {
- if(toWriteSkipRecs) {
- writeSkippedRec(key, value);
- }
- ret = moveToNext(key, value);
- skip++;
- }
- //close the skip writer once all the ranges are skipped
- if(skip>0 && skipIt.skippedAllRanges() && skipWriter!=null) {
- skipWriter.close();
- }
- skipRecCounter.increment(skip);
- reportNextRecordRange(umbilical, recIndex);
- if (ret) {
- incrCounters();
- }
- return ret;
- }
-
- protected synchronized boolean moveToNext(K key, V value)
- throws IOException {
- recIndex++;
- return super.moveToNext(key, value);
- }
-
- @SuppressWarnings("unchecked")
- private void writeSkippedRec(K key, V value) throws IOException{
- if(skipWriter==null) {
- Path skipDir = SkipBadRecords.getSkipOutputPath(conf);
- Path skipFile = new Path(skipDir, getTaskID().toString());
- skipWriter =
- SequenceFile.createWriter(
- skipFile.getFileSystem(conf), conf, skipFile,
- (Class<K>) createKey().getClass(),
- (Class<V>) createValue().getClass(),
- CompressionType.BLOCK, getTaskReporter());
- }
- skipWriter.append(key, value);
- }
- }
-
- @Override
- public void run(final JobConf job, final TaskUmbilicalProtocol umbilical)
- throws IOException, ClassNotFoundException, InterruptedException {
- this.umbilical = umbilical;
-
- if (isMapTask()) {
- // If there are no reducers then there won't be any sort. Hence the map
- // phase will govern the entire attempt's progress.
- if (conf.getNumReduceTasks() == 0) {
- mapPhase = getProgress().addPhase("map", 1.0f);
- } else {
- // If there are reducers then the entire attempt's progress will be
- // split between the map phase (67%) and the sort phase (33%).
- mapPhase = getProgress().addPhase("map", 0.667f);
- sortPhase = getProgress().addPhase("sort", 0.333f);
- }
- }
- TaskReporter reporter = startReporter(umbilical);
-
- boolean useNewApi = job.getUseNewMapper();
- initialize(job, getJobID(), reporter, useNewApi);
-
- // check if it is a cleanupJobTask
- if (jobCleanup) {
- runJobCleanupTask(umbilical, reporter);
- return;
- }
- if (jobSetup) {
- runJobSetupTask(umbilical, reporter);
- return;
- }
- if (taskCleanup) {
- runTaskCleanupTask(umbilical, reporter);
- return;
- }
-
- if (useNewApi) {
- runNewMapper(job, splitMetaInfo, umbilical, reporter);
- } else {
- runOldMapper(job, splitMetaInfo, umbilical, reporter);
- }
- done(umbilical, reporter);
- }
-
- @SuppressWarnings("unchecked")
- private <T> T getSplitDetails(Path file, long offset)
- throws IOException {
- FileSystem fs = file.getFileSystem(conf);
- FSDataInputStream inFile = fs.open(file);
- inFile.seek(offset);
- String className = Text.readString(inFile);
- Class<T> cls;
- try {
- cls = (Class<T>) conf.getClassByName(className);
- } catch (ClassNotFoundException ce) {
- IOException wrap = new IOException("Split class " + className +
- " not found");
- wrap.initCause(ce);
- throw wrap;
- }
- SerializationFactory factory = new SerializationFactory(conf);
- Deserializer<T> deserializer =
- (Deserializer<T>) factory.getDeserializer(cls);
- deserializer.open(inFile);
- T split = deserializer.deserialize(null);
- long pos = inFile.getPos();
- getCounters().findCounter(
- TaskCounter.SPLIT_RAW_BYTES).increment(pos - offset);
- inFile.close();
- return split;
- }
-
- @SuppressWarnings("unchecked")
- private <INKEY,INVALUE,OUTKEY,OUTVALUE>
- void runOldMapper(final JobConf job,
- final TaskSplitIndex splitIndex,
- final TaskUmbilicalProtocol umbilical,
- TaskReporter reporter
- ) throws IOException, InterruptedException,
- ClassNotFoundException {
- InputSplit inputSplit = getSplitDetails(new Path(splitIndex.getSplitLocation()),
- splitIndex.getStartOffset());
-
- updateJobWithSplit(job, inputSplit);
- reporter.setInputSplit(inputSplit);
-
- RecordReader<INKEY,INVALUE> in = isSkipping() ?
- new SkippingRecordReader<INKEY,INVALUE>(umbilical, reporter, job) :
- new TrackedRecordReader<INKEY,INVALUE>(reporter, job);
- job.setBoolean(JobContext.SKIP_RECORDS, isSkipping());
-
-
- int numReduceTasks = conf.getNumReduceTasks();
- LOG.info("numReduceTasks: " + numReduceTasks);
- MapOutputCollector collector = null;
- if (numReduceTasks > 0) {
- collector = new MapOutputBuffer(umbilical, job, reporter);
- } else {
- collector = new DirectMapOutputCollector(umbilical, job, reporter);
- }
- MapRunnable<INKEY,INVALUE,OUTKEY,OUTVALUE> runner =
- ReflectionUtils.newInstance(job.getMapRunnerClass(), job);
-
- try {
- runner.run(in, new OldOutputCollector(collector, conf), reporter);
- mapPhase.complete();
- // start the sort phase only if there are reducers
- if (numReduceTasks > 0) {
- setPhase(TaskStatus.Phase.SORT);
- }
- statusUpdate(umbilical);
- collector.flush();
- } finally {
- //close
- in.close(); // close input
- collector.close();
- }
- }
-
- /**
- * Update the job with details about the file split
- * @param job the job configuration to update
- * @param inputSplit the file split
- */
- private void updateJobWithSplit(final JobConf job, InputSplit inputSplit) {
- if (inputSplit instanceof FileSplit) {
- FileSplit fileSplit = (FileSplit) inputSplit;
- job.set(JobContext.MAP_INPUT_FILE, fileSplit.getPath().toString());
- job.setLong(JobContext.MAP_INPUT_START, fileSplit.getStart());
- job.setLong(JobContext.MAP_INPUT_PATH, fileSplit.getLength());
- }
- }
-
- static class NewTrackingRecordReader<K,V>
- extends org.apache.hadoop.mapreduce.RecordReader<K,V> {
- private final org.apache.hadoop.mapreduce.RecordReader<K,V> real;
- private final org.apache.hadoop.mapreduce.Counter inputRecordCounter;
- private final org.apache.hadoop.mapreduce.Counter fileInputByteCounter;
- private final TaskReporter reporter;
- private final Statistics fsStats;
-
- NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
- org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
- TaskReporter reporter,
- org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
- throws InterruptedException, IOException {
- this.reporter = reporter;
- this.inputRecordCounter = reporter
- .getCounter(TaskCounter.MAP_INPUT_RECORDS);
- this.fileInputByteCounter = reporter
- .getCounter(FileInputFormatCounter.BYTES_READ);
-
- Statistics matchedStats = null;
- if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
- matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
- .getPath(), taskContext.getConfiguration());
- }
- fsStats = matchedStats;
-
- long bytesInPrev = getInputBytes(fsStats);
- this.real = inputFormat.createRecordReader(split, taskContext);
- long bytesInCurr = getInputBytes(fsStats);
- fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
- }
-
- @Override
- public void close() throws IOException {
- long bytesInPrev = getInputBytes(fsStats);
- real.close();
- long bytesInCurr = getInputBytes(fsStats);
- fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
- }
-
- @Override
- public K getCurrentKey() throws IOException, InterruptedException {
- return real.getCurrentKey();
- }
-
- @Override
- public V getCurrentValue() throws IOException, InterruptedException {
- return real.getCurrentValue();
- }
-
- @Override
- public float getProgress() throws IOException, InterruptedException {
- return real.getProgress();
- }
-
- @Override
- public void initialize(org.apache.hadoop.mapreduce.InputSplit split,
- org.apache.hadoop.mapreduce.TaskAttemptContext context
- ) throws IOException, InterruptedException {
- long bytesInPrev = getInputBytes(fsStats);
- real.initialize(split, context);
- long bytesInCurr = getInputBytes(fsStats);
- fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
- }
-
- @Override
- public boolean nextKeyValue() throws IOException, InterruptedException {
- long bytesInPrev = getInputBytes(fsStats);
- boolean result = real.nextKeyValue();
- long bytesInCurr = getInputBytes(fsStats);
- if (result) {
- inputRecordCounter.increment(1);
- }
- fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
- reporter.setProgress(getProgress());
- return result;
- }
-
- private long getInputBytes(Statistics stats) {
- return stats == null ? 0 : stats.getBytesRead();
- }
- }
-
- /**
- * Since the mapred and mapreduce Partitioners don't share a common interface
- * (JobConfigurable is deprecated and a subtype of mapred.Partitioner), the
- * partitioner lives in Old/NewOutputCollector. Note that, for map-only jobs,
- * the configured partitioner should not be called. It's common for
- * partitioners to compute a result mod numReduces, which causes a div0 error
- */
- private static class OldOutputCollector<K,V> implements OutputCollector<K,V> {
- private final Partitioner<K,V> partitioner;
- private final MapOutputCollector<K,V> collector;
- private final int numPartitions;
-
- @SuppressWarnings("unchecked")
- OldOutputCollector(MapOutputCollector<K,V> collector, JobConf conf) {
- numPartitions = conf.getNumReduceTasks();
- if (numPartitions > 1) {
- partitioner = (Partitioner<K,V>)
- ReflectionUtils.newInstance(conf.getPartitionerClass(), conf);
- } else {
- partitioner = new Partitioner<K,V>() {
- @Override
- public void configure(JobConf job) { }
- @Override
- public int getPartition(K key, V value, int numPartitions) {
- return numPartitions - 1;
- }
- };
- }
- this.collector = collector;
- }
-
- @Override
- public void collect(K key, V value) throws IOException {
- try {
- collector.collect(key, value,
- partitioner.getPartition(key, value, numPartitions));
- } catch (InterruptedException ie) {
- Thread.currentThread().interrupt();
- throw new IOException("interrupt exception", ie);
- }
- }
- }
-
- private class NewDirectOutputCollector<K,V>
- extends org.apache.hadoop.mapreduce.RecordWriter<K,V> {
- private final org.apache.hadoop.mapreduce.RecordWriter out;
-
- private final TaskReporter reporter;
-
- private final Counters.Counter mapOutputRecordCounter;
- private final Counters.Counter fileOutputByteCounter;
- private final Statistics fsStats;
-
- @SuppressWarnings("unchecked")
- NewDirectOutputCollector(MRJobConfig jobContext,
- JobConf job, TaskUmbilicalProtocol umbilical, TaskReporter reporter)
- throws IOException, ClassNotFoundException, InterruptedException {
- this.reporter = reporter;
- mapOutputRecordCounter = reporter
- .getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
- fileOutputByteCounter = reporter
- .getCounter(FileOutputFormatCounter.BYTES_WRITTEN);
-
- Statistics matchedStats = null;
- if (outputFormat instanceof org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) {
- matchedStats = getFsStatistics(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
- .getOutputPath(taskContext), taskContext.getConfiguration());
- }
- fsStats = matchedStats;
-
- long bytesOutPrev = getOutputBytes(fsStats);
- out = outputFormat.getRecordWriter(taskContext);
- long bytesOutCurr = getOutputBytes(fsStats);
- fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public void write(K key, V value)
- throws IOException, InterruptedException {
- reporter.progress();
- long bytesOutPrev = getOutputBytes(fsStats);
- out.write(key, value);
- long bytesOutCurr = getOutputBytes(fsStats);
- fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
- mapOutputRecordCounter.increment(1);
- }
-
- @Override
- public void close(TaskAttemptContext context)
- throws IOException,InterruptedException {
- reporter.progress();
- if (out != null) {
- long bytesOutPrev = getOutputBytes(fsStats);
- out.close(context);
- long bytesOutCurr = getOutputBytes(fsStats);
- fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
- }
- }
-
- private long getOutputBytes(Statistics stats) {
- return stats == null ? 0 : stats.getBytesWritten();
- }
- }
-
- private class NewOutputCollector<K,V>
- extends org.apache.hadoop.mapreduce.RecordWriter<K,V> {
- private final MapOutputCollector<K,V> collector;
- private final org.apache.hadoop.mapreduce.Partitioner<K,V> partitioner;
- private final int partitions;
-
- @SuppressWarnings("unchecked")
- NewOutputCollector(org.apache.hadoop.mapreduce.JobContext jobContext,
- JobConf job,
- TaskUmbilicalProtocol umbilical,
- TaskReporter reporter
- ) throws IOException, ClassNotFoundException {
- collector = new MapOutputBuffer<K,V>(umbilical, job, reporter);
- partitions = jobContext.getNumReduceTasks();
- if (partitions > 1) {
- partitioner = (org.apache.hadoop.mapreduce.Partitioner<K,V>)
- ReflectionUtils.newInstance(jobContext.getPartitionerClass(), job);
- } else {
- partitioner = new org.apache.hadoop.mapreduce.Partitioner<K,V>() {
- @Override
- public int getPartition(K key, V value, int numPartitions) {
- return partitions - 1;
- }
- };
- }
- }
-
- @Override
- public void write(K key, V value) throws IOException, InterruptedException {
- collector.collect(key, value,
- partitioner.getPartition(key, value, partitions));
- }
-
- @Override
- public void close(TaskAttemptContext context
- ) throws IOException,InterruptedException {
- try {
- collector.flush();
- } catch (ClassNotFoundException cnf) {
- throw new IOException("can't find class ", cnf);
- }
- collector.close();
- }
- }
-
- @SuppressWarnings("unchecked")
- private <INKEY,INVALUE,OUTKEY,OUTVALUE>
- void runNewMapper(final JobConf job,
- final TaskSplitIndex splitIndex,
- final TaskUmbilicalProtocol umbilical,
- TaskReporter reporter
- ) throws IOException, ClassNotFoundException,
- InterruptedException {
- // make a task context so we can get the classes
- org.apache.hadoop.mapreduce.TaskAttemptContext taskContext =
- new org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl(job,
- getTaskID(),
- reporter);
- // make a mapper
- org.apache.hadoop.mapreduce.Mapper<INKEY,INVALUE,OUTKEY,OUTVALUE> mapper =
- (org.apache.hadoop.mapreduce.Mapper<INKEY,INVALUE,OUTKEY,OUTVALUE>)
- ReflectionUtils.newInstance(taskContext.getMapperClass(), job);
- // make the input format
- org.apache.hadoop.mapreduce.InputFormat<INKEY,INVALUE> inputFormat =
- (org.apache.hadoop.mapreduce.InputFormat<INKEY,INVALUE>)
- ReflectionUtils.newInstance(taskContext.getInputFormatClass(), job);
- // rebuild the input split
- org.apache.hadoop.mapreduce.InputSplit split = null;
- split = getSplitDetails(new Path(splitIndex.getSplitLocation()),
- splitIndex.getStartOffset());
-
- org.apache.hadoop.mapreduce.RecordReader<INKEY,INVALUE> input =
- new NewTrackingRecordReader<INKEY,INVALUE>
- (split, inputFormat, reporter, taskContext);
-
- job.setBoolean(JobContext.SKIP_RECORDS, isSkipping());
- org.apache.hadoop.mapreduce.RecordWriter output = null;
-
- // get an output object
- if (job.getNumReduceTasks() == 0) {
- output =
- new NewDirectOutputCollector(taskContext, job, umbilical, reporter);
- } else {
- output = new NewOutputCollector(taskContext, job, umbilical, reporter);
- }
-
- org.apache.hadoop.mapreduce.MapContext<INKEY, INVALUE, OUTKEY, OUTVALUE>
- mapContext =
- new MapContextImpl<INKEY, INVALUE, OUTKEY, OUTVALUE>(job, getTaskID(),
- input, output,
- committer,
- reporter, split);
-
- org.apache.hadoop.mapreduce.Mapper<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context
- mapperContext =
- new WrappedMapper<INKEY, INVALUE, OUTKEY, OUTVALUE>().getMapContext(
- mapContext);
-
- input.initialize(split, mapperContext);
- mapper.run(mapperContext);
- mapPhase.complete();
- setPhase(TaskStatus.Phase.SORT);
- statusUpdate(umbilical);
- input.close();
- output.close(mapperContext);
- }
-
- interface MapOutputCollector<K, V> {
-
- public void collect(K key, V value, int partition
- ) throws IOException, InterruptedException;
- public void close() throws IOException, InterruptedException;
-
- public void flush() throws IOException, InterruptedException,
- ClassNotFoundException;
-
- }
-
- class DirectMapOutputCollector<K, V>
- implements MapOutputCollector<K, V> {
-
- private RecordWriter<K, V> out = null;
-
- private TaskReporter reporter = null;
-
- private final Counters.Counter mapOutputRecordCounter;
- private final Counters.Counter fileOutputByteCounter;
- private final Statistics fsStats;
-
- @SuppressWarnings("unchecked")
- public DirectMapOutputCollector(TaskUmbilicalProtocol umbilical,
- JobConf job, TaskReporter reporter) throws IOException {
- this.reporter = reporter;
- String finalName = getOutputName(getPartition());
- FileSystem fs = FileSystem.get(job);
-
- OutputFormat<K, V> outputFormat = job.getOutputFormat();
- mapOutputRecordCounter = reporter.getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
-
- fileOutputByteCounter = reporter
- .getCounter(FileOutputFormatCounter.BYTES_WRITTEN);
-
- Statistics matchedStats = null;
- if (outputFormat instanceof FileOutputFormat) {
- matchedStats = getFsStatistics(FileOutputFormat.getOutputPath(job), job);
- }
- fsStats = matchedStats;
-
- long bytesOutPrev = getOutputBytes(fsStats);
- out = job.getOutputFormat().getRecordWriter(fs, job, finalName, reporter);
- long bytesOutCurr = getOutputBytes(fsStats);
- fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
- }
-
- public void close() throws IOException {
- if (this.out != null) {
- long bytesOutPrev = getOutputBytes(fsStats);
- out.close(this.reporter);
- long bytesOutCurr = getOutputBytes(fsStats);
- fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
- }
-
- }
-
- public void flush() throws IOException, InterruptedException,
- ClassNotFoundException {
- }
-
- public void collect(K key, V value, int partition) throws IOException {
- reporter.progress();
- long bytesOutPrev = getOutputBytes(fsStats);
- out.write(key, value);
- long bytesOutCurr = getOutputBytes(fsStats);
- fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
- mapOutputRecordCounter.increment(1);
- }
-
- private long getOutputBytes(Statistics stats) {
- return stats == null ? 0 : stats.getBytesWritten();
- }
- }
-
- private class MapOutputBuffer<K extends Object, V extends Object>
- implements MapOutputCollector<K, V>, IndexedSortable {
- final int partitions;
- final JobConf job;
- final TaskReporter reporter;
- final Class<K> keyClass;
- final Class<V> valClass;
- final RawComparator<K> comparator;
- final SerializationFactory serializationFactory;
- final Serializer<K> keySerializer;
- final Serializer<V> valSerializer;
- final CombinerRunner<K,V> combinerRunner;
- final CombineOutputCollector<K, V> combineCollector;
-
- // Compression for map-outputs
- final CompressionCodec codec;
-
- // k/v accounting
- final IntBuffer kvmeta; // metadata overlay on backing store
- int kvstart; // marks origin of spill metadata
- int kvend; // marks end of spill metadata
- int kvindex; // marks end of fully serialized records
-
- int equator; // marks origin of meta/serialization
- int bufstart; // marks beginning of spill
- int bufend; // marks beginning of collectable
- int bufmark; // marks end of record
- int bufindex; // marks end of collected
- int bufvoid; // marks the point where we should stop
- // reading at the end of the buffer
-
- byte[] kvbuffer; // main output buffer
- private final byte[] b0 = new byte[0];
-
- private static final int INDEX = 0; // index offset in acct
- private static final int VALSTART = 1; // val offset in acct
- private static final int KEYSTART = 2; // key offset in acct
- private static final int PARTITION = 3; // partition offset in acct
- private static final int NMETA = 4; // num meta ints
- private static final int METASIZE = NMETA * 4; // size in bytes
-
- // spill accounting
- final int maxRec;
- final int softLimit;
- boolean spillInProgress;;
- int bufferRemaining;
- volatile Throwable sortSpillException = null;
-
- int numSpills = 0;
- final int minSpillsForCombine;
- final IndexedSorter sorter;
- final ReentrantLock spillLock = new ReentrantLock();
- final Condition spillDone = spillLock.newCondition();
- final Condition spillReady = spillLock.newCondition();
- final BlockingBuffer bb = new BlockingBuffer();
- volatile boolean spillThreadRunning = false;
- final SpillThread spillThread = new SpillThread();
-
- final FileSystem rfs;
-
- // Counters
- final Counters.Counter mapOutputByteCounter;
- final Counters.Counter mapOutputRecordCounter;
- final Counters.Counter fileOutputByteCounter;
-
- final ArrayList<SpillRecord> indexCacheList =
- new ArrayList<SpillRecord>();
- private int totalIndexCacheMemory;
- private int indexCacheMemoryLimit;
- private static final int INDEX_CACHE_MEMORY_LIMIT_DEFAULT = 1024 * 1024;
-
- @SuppressWarnings("unchecked")
- public MapOutputBuffer(TaskUmbilicalProtocol umbilical, JobConf job,
- TaskReporter reporter
- ) throws IOException, ClassNotFoundException {
- this.job = job;
- this.reporter = reporter;
- partitions = job.getNumReduceTasks();
- rfs = ((LocalFileSystem)FileSystem.getLocal(job)).getRaw();
-
- //sanity checks
- final float spillper =
- job.getFloat(JobContext.MAP_SORT_SPILL_PERCENT, (float)0.8);
- final int sortmb = job.getInt(JobContext.IO_SORT_MB, 100);
- indexCacheMemoryLimit = job.getInt(JobContext.INDEX_CACHE_MEMORY_LIMIT,
- INDEX_CACHE_MEMORY_LIMIT_DEFAULT);
- if (spillper > (float)1.0 || spillper <= (float)0.0) {
- throw new IOException("Invalid \"" + JobContext.MAP_SORT_SPILL_PERCENT +
- "\": " + spillper);
- }
- if ((sortmb & 0x7FF) != sortmb) {
- throw new IOException(
- "Invalid \"" + JobContext.IO_SORT_MB + "\": " + sortmb);
- }
- sorter = ReflectionUtils.newInstance(job.getClass("map.sort.class",
- QuickSort.class, IndexedSorter.class), job);
- // buffers and accounting
- int maxMemUsage = sortmb << 20;
- maxMemUsage -= maxMemUsage % METASIZE;
- kvbuffer = new byte[maxMemUsage];
- bufvoid = kvbuffer.length;
- kvmeta = ByteBuffer.wrap(kvbuffer).asIntBuffer();
- setEquator(0);
- bufstart = bufend = bufindex = equator;
- kvstart = kvend = kvindex;
-
- maxRec = kvmeta.capacity() / NMETA;
- softLimit = (int)(kvbuffer.length * spillper);
- bufferRemaining = softLimit;
- if (LOG.isInfoEnabled()) {
- LOG.info(JobContext.IO_SORT_MB + ": " + sortmb);
- LOG.info("soft limit at " + softLimit);
- LOG.info("bufstart = " + bufstart + "; bufvoid = " + bufvoid);
- LOG.info("kvstart = " + kvstart + "; length = " + maxRec);
- }
-
- // k/v serialization
- comparator = job.getOutputKeyComparator();
- keyClass = (Class<K>)job.getMapOutputKeyClass();
- valClass = (Class<V>)job.getMapOutputValueClass();
- serializationFactory = new SerializationFactory(job);
- keySerializer = serializationFactory.getSerializer(keyClass);
- keySerializer.open(bb);
- valSerializer = serializationFactory.getSerializer(valClass);
- valSerializer.open(bb);
-
- // output counters
- mapOutputByteCounter = reporter.getCounter(TaskCounter.MAP_OUTPUT_BYTES);
- mapOutputRecordCounter =
- reporter.getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
- fileOutputByteCounter = reporter
- .getCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES);
-
- // compression
- if (job.getCompressMapOutput()) {
- Class<? extends CompressionCodec> codecClass =
- job.getMapOutputCompressorClass(DefaultCodec.class);
- codec = ReflectionUtils.newInstance(codecClass, job);
- } else {
- codec = null;
- }
-
- // combiner
- final Counters.Counter combineInputCounter =
- reporter.getCounter(TaskCounter.COMBINE_INPUT_RECORDS);
- combinerRunner = CombinerRunner.create(job, getTaskID(),
- combineInputCounter,
- reporter, null);
- if (combinerRunner != null) {
- final Counters.Counter combineOutputCounter =
- reporter.getCounter(TaskCounter.COMBINE_OUTPUT_RECORDS);
- combineCollector= new CombineOutputCollector<K,V>(combineOutputCounter, reporter, conf);
- } else {
- combineCollector = null;
- }
- spillInProgress = false;
- minSpillsForCombine = job.getInt(JobContext.MAP_COMBINE_MIN_SPILLS, 3);
- spillThread.setDaemon(true);
- spillThread.setName("SpillThread");
- spillLock.lock();
- try {
- spillThread.start();
- while (!spillThreadRunning) {
- spillDone.await();
- }
- } catch (InterruptedException e) {
- throw new IOException("Spill thread failed to initialize", e);
- } finally {
- spillLock.unlock();
- }
- if (sortSpillException != null) {
- throw new IOException("Spill thread failed to initialize",
- sortSpillException);
- }
- }
-
- /**
- * Serialize the key, value to intermediate storage.
- * When this method returns, kvindex must refer to sufficient unused
- * storage to store one METADATA.
- */
- public synchronized void collect(K key, V value, final int partition
- ) throws IOException {
- reporter.progress();
- if (key.getClass() != keyClass) {
- throw new IOException("Type mismatch in key from map: expected "
- + keyClass.getName() + ", received "
- + key.getClass().getName());
- }
- if (value.getClass() != valClass) {
- throw new IOException("Type mismatch in value from map: expected "
- + valClass.getName() + ", received "
- + value.getClass().getName());
- }
- if (partition < 0 || partition >= partitions) {
- throw new IOException("Illegal partition for " + key + " (" +
- partition + ")");
- }
- checkSpillException();
- bufferRemaining -= METASIZE;
- if (bufferRemaining <= 0) {
- // start spill if the thread is not running and the soft limit has been
- // reached
- spillLock.lock();
- try {
- do {
- if (!spillInProgress) {
- final int kvbidx = 4 * kvindex;
- final int kvbend = 4 * kvend;
- // serialized, unspilled bytes always lie between kvindex and
- // bufindex, crossing the equator. Note that any void space
- // created by a reset must be included in "used" bytes
- final int bUsed = distanceTo(kvbidx, bufindex);
- final boolean bufsoftlimit = bUsed >= softLimit;
- if ((kvbend + METASIZE) % kvbuffer.length !=
- equator - (equator % METASIZE)) {
- // spill finished, reclaim space
- resetSpill();
- bufferRemaining = Math.min(
- distanceTo(bufindex, kvbidx) - 2 * METASIZE,
- softLimit - bUsed) - METASIZE;
- continue;
- } else if (bufsoftlimit && kvindex != kvend) {
- // spill records, if any collected; check latter, as it may
- // be possible for metadata alignment to hit spill pcnt
- startSpill();
- final int avgRec = (int)
- (mapOutputByteCounter.getCounter() /
- mapOutputRecordCounter.getCounter());
- // leave at least half the split buffer for serialization data
- // ensure that kvindex >= bufindex
- final int distkvi = distanceTo(bufindex, kvbidx);
- final int newPos = (bufindex +
- Math.max(2 * METASIZE - 1,
- Math.min(distkvi / 2,
- distkvi / (METASIZE + avgRec) * METASIZE)))
- % kvbuffer.length;
- setEquator(newPos);
- bufmark = bufindex = newPos;
- final int serBound = 4 * kvend;
- // bytes remaining before the lock must be held and limits
- // checked is the minimum of three arcs: the metadata space, the
- // serialization space, and the soft limit
- bufferRemaining = Math.min(
- // metadata max
- distanceTo(bufend, newPos),
- Math.min(
- // serialization max
- distanceTo(newPos, serBound),
- // soft limit
- softLimit)) - 2 * METASIZE;
- }
- }
- } while (false);
- } finally {
- spillLock.unlock();
- }
- }
-
- try {
- // serialize key bytes into buffer
- int keystart = bufindex;
- keySerializer.serialize(key);
- if (bufindex < keystart) {
- // wrapped the key; must make contiguous
- bb.shiftBufferedKey();
- keystart = 0;
- }
- // serialize value bytes into buffer
- final int valstart = bufindex;
- valSerializer.serialize(value);
- // It's possible for records to have zero length, i.e. the serializer
- // will perform no writes. To ensure that the boundary conditions are
- // checked and that the kvindex invariant is maintained, perform a
- // zero-length write into the buffer. The logic monitoring this could be
- // moved into collect, but this is cleaner and inexpensive. For now, it
- // is acceptable.
- bb.write(b0, 0, 0);
-
- // the record must be marked after the preceding write, as the metadata
- // for this record are not yet written
- int valend = bb.markRecord();
-
- mapOutputRecordCounter.increment(1);
- mapOutputByteCounter.increment(
- distanceTo(keystart, valend, bufvoid));
-
- // write accounting info
- kvmeta.put(kvindex + INDEX, kvindex);
- kvmeta.put(kvindex + PARTITION, partition);
- kvmeta.put(kvindex + KEYSTART, keystart);
- kvmeta.put(kvindex + VALSTART, valstart);
- // advance kvindex
- kvindex = (kvindex - NMETA + kvmeta.capacity()) % kvmeta.capacity();
- } catch (MapBufferTooSmallException e) {
- LOG.info("Record too large for in-memory buffer: " + e.getMessage());
- spillSingleRecord(key, value, partition);
- mapOutputRecordCounter.increment(1);
- return;
- }
- }
-
- /**
- * Set the point from which meta and serialization data expand. The meta
- * indices are aligned with the buffer, so metadata never spans the ends of
- * the circular buffer.
- */
- private void setEquator(int pos) {
- equator = pos;
- // set index prior to first entry, aligned at meta boundary
- final int aligned = pos - (pos % METASIZE);
- kvindex =
- ((aligned - METASIZE + kvbuffer.length) % kvbuffer.length) / 4;
- if (LOG.isInfoEnabled()) {
- LOG.info("(EQUATOR) " + pos + " kvi " + kvindex +
- "(" + (kvindex * 4) + ")");
- }
- }
-
- /**
- * The spill is complete, so set the buffer and meta indices to be equal to
- * the new equator to free space for continuing collection. Note that when
- * kvindex == kvend == kvstart, the buffer is empty.
- */
- private void resetSpill() {
- final int e = equator;
- bufstart = bufend = e;
- final int aligned = e - (e % METASIZE);
- // set start/end to point to first meta record
- kvstart = kvend =
- ((aligned - METASIZE + kvbuffer.length) % kvbuffer.length) / 4;
- if (LOG.isInfoEnabled()) {
- LOG.info("(RESET) equator " + e + " kv " + kvstart + "(" +
- (kvstart * 4) + ")" + " kvi " + kvindex + "(" + (kvindex * 4) + ")");
- }
- }
-
- /**
- * Compute the distance in bytes between two indices in the serialization
- * buffer.
- * @see #distanceTo(int,int,int)
- */
- final int distanceTo(final int i, final int j) {
- return distanceTo(i, j, kvbuffer.length);
- }
-
- /**
- * Compute the distance between two indices in the circular buffer given the
- * max distance.
- */
- int distanceTo(final int i, final int j, final int mod) {
- return i <= j
- ? j - i
- : mod - i + j;
- }
-
- /**
- * For the given meta position, return the dereferenced position in the
- * integer array. Each meta block contains several integers describing
- * record data in its serialized form, but the INDEX is not necessarily
- * related to the proximate metadata. The index value at the referenced int
- * position is the start offset of the associated metadata block. So the
- * metadata INDEX at metapos may point to the metadata described by the
- * metadata block at metapos + k, which contains information about that
- * serialized record.
- */
- int offsetFor(int metapos) {
- return kvmeta.get(metapos * NMETA + INDEX);
- }
-
- /**
- * Compare logical range, st i, j MOD offset capacity.
- * Compare by partition, then by key.
- * @see IndexedSortable#compare
- */
- public int compare(final int mi, final int mj) {
- final int kvi = offsetFor(mi % maxRec);
- final int kvj = offsetFor(mj % maxRec);
- final int kvip = kvmeta.get(kvi + PARTITION);
- final int kvjp = kvmeta.get(kvj + PARTITION);
- // sort by partition
- if (kvip != kvjp) {
- return kvip - kvjp;
- }
- // sort by key
- return comparator.compare(kvbuffer,
- kvmeta.get(kvi + KEYSTART),
- kvmeta.get(kvi + VALSTART) - kvmeta.get(kvi + KEYSTART),
- kvbuffer,
- kvmeta.get(kvj + KEYSTART),
- kvmeta.get(kvj + VALSTART) - kvmeta.get(kvj + KEYSTART));
- }
-
- /**
- * Swap logical indices st i, j MOD offset capacity.
- * @see IndexedSortable#swap
- */
- public void swap(final int mi, final int mj) {
- final int kvi = (mi % maxRec) * NMETA + INDEX;
- final int kvj = (mj % maxRec) * NMETA + INDEX;
- int tmp = kvmeta.get(kvi);
- kvmeta.put(kvi, kvmeta.get(kvj));
- kvmeta.put(kvj, tmp);
- }
-
- /**
- * Inner class managing the spill of serialized records to disk.
- */
- protected class BlockingBuffer extends DataOutputStream {
-
- public BlockingBuffer() {
- super(new Buffer());
- }
-
- /**
- * Mark end of record. Note that this is required if the buffer is to
- * cut the spill in the proper place.
- */
- public int markRecord() {
- bufmark = bufindex;
- return bufindex;
- }
-
- /**
- * Set position from last mark to end of writable buffer, then rewrite
- * the data between last mark and kvindex.
- * This handles a special case where the key wraps around the buffer.
- * If the key is to be passed to a RawComparator, then it must be
- * contiguous in the buffer. This recopies the data in the buffer back
- * into itself, but starting at the beginning of the buffer. Note that
- * this method should <b>only</b> be called immediately after detecting
- * this condition. To call it at any other time is undefined and would
- * likely result in data loss or corruption.
- * @see #markRecord()
- */
- protected void shiftBufferedKey() throws IOException {
- // spillLock unnecessary; both kvend and kvindex are current
- int headbytelen = bufvoid - bufmark;
- bufvoid = bufmark;
- final int kvbidx = 4 * kvindex;
- final int kvbend = 4 * kvend;
- final int avail =
- Math.min(distanceTo(0, kvbidx), distanceTo(0, kvbend));
- if (bufindex + headbytelen < avail) {
- System.arraycopy(kvbuffer, 0, kvbuffer, headbytelen, bufindex);
- System.arraycopy(kvbuffer, bufvoid, kvbuffer, 0, headbytelen);
- bufindex += headbytelen;
- bufferRemaining -= kvbuffer.length - bufvoid;
- } else {
- byte[] keytmp = new byte[bufindex];
- System.arraycopy(kvbuffer, 0, keytmp, 0, bufindex);
- bufindex = 0;
- out.write(kvbuffer, bufmark, headbytelen);
- out.write(keytmp);
- }
- }
- }
-
- public class Buffer extends OutputStream {
- private final byte[] scratch = new byte[1];
-
- @Override
- public void write(int v)
- throws IOException {
- scratch[0] = (byte)v;
- write(scratch, 0, 1);
- }
-
- /**
- * Attempt to write a sequence of bytes to the collection buffer.
- * This method will block if the spill thread is running and it
- * cannot write.
- * @throws MapBufferTooSmallException if record is too large to
- * deserialize into the collection buffer.
- */
- @Override
- public void write(byte b[], int off, int len)
- throws IOException {
- // must always verify the invariant that at least METASIZE bytes are
- // available beyond kvindex, even when len == 0
- bufferRemaining -= len;
- if (bufferRemaining <= 0) {
- // writing these bytes could exhaust available buffer space or fill
- // the buffer to soft limit. check if spill or blocking are necessary
- boolean blockwrite = false;
- spillLock.lock();
- try {
- do {
- checkSpillException();
-
- final int kvbidx = 4 * kvindex;
- final int kvbend = 4 * kvend;
- // ser distance to key index
- final int distkvi = distanceTo(bufindex, kvbidx);
- // ser distance to spill end index
- final int distkve = distanceTo(bufindex, kvbend);
-
- // if kvindex is closer than kvend, then a spill is neither in
- // progress nor complete and reset since the lock was held. The
- // write should block only if there is insufficient space to
- // complete the current write, write the metadata for this record,
- // and write the metadata for the next record. If kvend is closer,
- // then the write should block if there is too little space for
- // either the metadata or the current write. Note that collect
- // ensures its metadata requirement with a zero-length write
- blockwrite = distkvi <= distkve
- ? distkvi <= len + 2 * METASIZE
- : distkve <= len || distanceTo(bufend, kvbidx) < 2 * METASIZE;
-
- if (!spillInProgress) {
- if (blockwrite) {
- if ((kvbend + METASIZE) % kvbuffer.length !=
- equator - (equator % METASIZE)) {
- // spill finished, reclaim space
- // need to use meta exclusively; zero-len rec & 100% spill
- // pcnt would fail
- resetSpill(); // resetSpill doesn't move bufindex, kvindex
- bufferRemaining = Math.min(
- distkvi - 2 * METASIZE,
- softLimit - distanceTo(kvbidx, bufindex)) - len;
- continue;
- }
- // we have records we can spill; only spill if blocked
- if (kvindex != kvend) {
- startSpill();
- // Blocked on this write, waiting for the spill just
- // initiated to finish. Instead of repositioning the marker
- // and copying the partial record, we set the record start
- // to be the new equator
- setEquator(bufmark);
- } else {
- // We have no buffered records, and this record is too large
- // to write into kvbuffer. We must spill it directly from
- // collect
- final int size = distanceTo(bufstart, bufindex) + len;
- setEquator(0);
- bufstart = bufend = bufindex = equator;
- kvstart = kvend = kvindex;
- bufvoid = kvbuffer.length;
- throw new MapBufferTooSmallException(size + " bytes");
- }
- }
- }
-
- if (blockwrite) {
- // wait for spill
- try {
- while (spillInProgress) {
- reporter.progress();
- spillDone.await();
- }
- } catch (InterruptedException e) {
- throw new IOException(
- "Buffer interrupted while waiting for the writer", e);
- }
- }
- } while (blockwrite);
- } finally {
- spillLock.unlock();
- }
- }
- // here, we know that we have sufficient space to write
- if (bufindex + len > bufvoid) {
- final int gaplen = bufvoid - bufindex;
- System.arraycopy(b, off, kvbuffer, bufindex, gaplen);
- len -= gaplen;
- off += gaplen;
- bufindex = 0;
- }
- System.arraycopy(b, off, kvbuffer, bufindex, len);
- bufindex += len;
- }
- }
-
- public void flush() throws IOException, ClassNotFoundException,
- InterruptedException {
- LOG.info("Starting flush of map output");
- spillLock.lock();
- try {
- while (spillInProgress) {
- reporter.progress();
- spillDone.await();
- }
- checkSpillException();
-
- final int kvbend = 4 * kvend;
- if ((kvbend + METASIZE) % kvbuffer.length !=
- equator - (equator % METASIZE)) {
- // spill finished
- resetSpill();
- }
- if (kvindex != kvend) {
- kvend = (kvindex + NMETA) % kvmeta.capacity();
- bufend = bufmark;
- if (LOG.isInfoEnabled()) {
- LOG.info("Spilling map output");
- LOG.info("bufstart = " + bufstart + "; bufend = " + bufmark +
- "; bufvoid = " + bufvoid);
- LOG.info("kvstart = " + kvstart + "(" + (kvstart * 4) +
- "); kvend = " + kvend + "(" + (kvend * 4) +
- "); length = " + (distanceTo(kvend, kvstart,
- kvmeta.capacity()) + 1) + "/" + maxRec);
- }
- sortAndSpill();
- }
- } catch (InterruptedException e) {
- throw new IOException("Interrupted while waiting for the writer", e);
- } finally {
- spillLock.unlock();
- }
- assert !spillLock.isHeldByCurrentThread();
- // shut down spill thread and wait for it to exit. Since the preceding
- // ensures that it is finished with its work (and sortAndSpill did not
- // throw), we elect to use an interrupt instead of setting a flag.
- // Spilling simultaneously from this thread while the spill thread
- // finishes its work might be both a useful way to extend this and also
- // sufficient motivation for the latter approach.
- try {
- spillThread.interrupt();
- spillThread.join();
- } catch (InterruptedException e) {
- throw new IOException("Spill failed", e);
- }
- // release sort buffer before the merge
- kvbuffer = null;
- mergeParts();
- Path outputPath = mapOutputFile.getOutputFile();
- fileOutputByteCounter.increment(rfs.getFileStatus(outputPath).getLen());
- }
-
- public void close() { }
-
- protected class SpillThread extends Thread {
-
- @Override
- public void run() {
- spillLock.lock();
- spillThreadRunning = true;
- try {
- while (true) {
- spillDone.signal();
- while (!spillInProgress) {
- spillReady.await();
- }
- try {
- spillLock.unlock();
- sortAndSpill();
- } catch (Throwable t) {
- sortSpillException = t;
- } finally {
- spillLock.lock();
- if (bufend < bufstart) {
- bufvoid = kvbuffer.length;
- }
- kvstart = kvend;
- bufstart = bufend;
- spillInProgress = false;
- }
- }
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- } finally {
- spillLock.unlock();
- spillThreadRunning = false;
- }
- }
- }
-
- private void checkSpillException() throws IOException {
- final Throwable lspillException = sortSpillException;
- if (lspillException != null) {
- if (lspillException instanceof Error) {
- final String logMsg = "Task " + getTaskID() + " failed : " +
- StringUtils.stringifyException(lspillException);
- reportFatalError(getTaskID(), lspillException, logMsg);
- }
- throw new IOException("Spill failed", lspillException);
- }
- }
-
- private void startSpill() {
- assert !spillInProgress;
- kvend = (kvindex + NMETA) % kvmeta.capacity();
- bufend = bufmark;
- spillInProgress = true;
- if (LOG.isInfoEnabled()) {
- LOG.info("Spilling map output");
- LOG.info("bufstart = " + bufstart + "; bufend = " + bufmark +
- "; bufvoid = " + bufvoid);
- LOG.info("kvstart = " + kvstart + "(" + (kvstart * 4) +
- "); kvend = " + kvend + "(" + (kvend * 4) +
- "); length = " + (distanceTo(kvend, kvstart,
- kvmeta.capacity()) + 1) + "/" + maxRec);
- }
- spillReady.signal();
- }
-
- private void sortAndSpill() throws IOException, ClassNotFoundException,
- InterruptedException {
- //approximate the length of the output file to be the length of the
- //buffer + header lengths for the partitions
- final long size = (bufend >= bufstart
- ? bufend - bufstart
- : (bufvoid - bufend) + bufstart) +
- partitions * APPROX_HEADER_LENGTH;
- FSDataOutputStream out = null;
- try {
- // create spill file
- final SpillRecord spillRec = new SpillRecord(partitions);
- final Path filename =
- mapOutputFile.getSpillFileForWrite(numSpills, size);
- out = rfs.create(filename);
-
- final int mstart = kvend / NMETA;
- final int mend = 1 + // kvend is a valid record
- (kvstart >= kvend
- ? kvstart
- : kvmeta.capacity() + kvstart) / NMETA;
- sorter.sort(MapOutputBuffer.this, mstart, mend, reporter);
- int spindex = mstart;
- final IndexRecord rec = new IndexRecord();
- final InMemValBytes value = new InMemValBytes();
- for (int i = 0; i < partitions; ++i) {
- IFile.Writer<K, V> writer = null;
- try {
- long segmentStart = out.getPos();
- writer = new Writer<K, V>(job, out, keyClass, valClass, codec,
- spilledRecordsCounter);
- if (combinerRunner == null) {
- // spill directly
- DataInputBuffer key = new DataInputBuffer();
- while (spindex < mend &&
- kvmeta.get(offsetFor(spindex % maxRec) + PARTITION) == i) {
- final int kvoff = offsetFor(spindex % maxRec);
- key.reset(kvbuffer, kvmeta.get(kvoff + KEYSTART),
- (kvmeta.get(kvoff + VALSTART) -
- kvmeta.get(kvoff + KEYSTART)));
- getVBytesForOffset(kvoff, value);
- writer.append(key, value);
- ++spindex;
- }
- } else {
- int spstart = spindex;
- while (spindex < mend &&
- kvmeta.get(offsetFor(spindex % maxRec)
- + PARTITION) == i) {
- ++spindex;
- }
- // Note: we would like to avoid the combiner if we've fewer
- // than some threshold of records for a partition
- if (spstart != spindex) {
- combineCollector.setWriter(writer);
- RawKeyValueIterator kvIter =
- new MRResultIterator(spstart, spindex);
- combinerRunner.combine(kvIter, combineCollector);
- }
- }
-
- // close the writer
- writer.close();
-
- // record offsets
- rec.startOffset = segmentStart;
- rec.rawLength = writer.getRawLength();
- rec.partLength = writer.getCompressedLength();
- spillRec.putIndex(rec, i);
-
- writer = null;
- } finally {
- if (null != writer) writer.close();
- }
- }
-
- if (totalIndexCacheMemory >= indexCacheMemoryLimit) {
- // create spill index file
- Path indexFilename =
- mapOutputFile.getSpillIndexFileForWrite(numSpills, partitions
- * MAP_OUTPUT_INDEX_RECORD_LENGTH);
- spillRec.writeToFile(indexFilename, job);
- } else {
- indexCacheList.add(spillRec);
- totalIndexCacheMemory +=
- spillRec.size() * MAP_OUTPUT_INDEX_RECORD_LENGTH;
- }
- LOG.info("Finished spill " + numSpills);
- ++numSpills;
- } finally {
- if (out != null) out.close();
- }
- }
-
- /**
- * Handles the degenerate case where serialization fails to fit in
- * the in-memory buffer, so we must spill the record from collect
- * directly to a spill file. Consider this "losing".
- */
- private void spillSingleRecord(final K key, final V value,
- int partition) throws IOException {
- long size = kvbuffer.length + partitions * APPROX_HEADER_LENGTH;
- FSDataOutputStream out = null;
- try {
- // create spill file
- final SpillRecord spillRec = new SpillRecord(partitions);
- final Path filename =
- mapOutputFile.getSpillFileForWrite(numSpills, size);
- out = rfs.create(filename);
-
- // we don't run the combiner for a single record
- IndexRecord rec = new IndexRecord();
- for (int i = 0; i < partitions; ++i) {
- IFile.Writer<K, V> writer = null;
- try {
- long segmentStart = out.getPos();
- // Create a new codec, don't care!
- writer = new IFile.Writer<K,V>(job, out, keyClass, valClass, codec,
- spilledRecordsCounter);
-
- if (i == partition) {
- final long recordStart = out.getPos();
- writer.append(key, value);
- // Note that our map byte count will not be accurate with
- // compression
- mapOutputByteCounter.increment(out.getPos() - recordStart);
- }
- writer.close();
-
- // record offsets
- rec.startOffset = segmentStart;
- rec.rawLength = writer.getRawLength();
- rec.partLength = writer.getCompressedLength();
- spillRec.putIndex(rec, i);
-
- writer = null;
- } catch (IOException e) {
- if (null != writer) writer.close();
- throw e;
- }
- }
- if (totalIndexCacheMemory >= indexCacheMemoryLimit) {
- // create spill index file
- Path indexFilename =
- mapOutputFile.getSpillIndexFileForWrite(numSpills, partitions
- * MAP_OUTPUT_INDEX_RECORD_LENGTH);
- spillRec.writeToFile(indexFilename, job);
- } else {
- indexCacheList.add(spillRec);
- totalIndexCacheMemory +=
- spillRec.size() * MAP_OUTPUT_INDEX_RECORD_LENGTH;
- }
- ++numSpills;
- } finally {
- if (out != null) out.close();
- }
- }
-
- /**
- * Given an offset, populate vbytes with the associated set of
- * deserialized value bytes. Should only be called during a spill.
- */
- private void getVBytesForOffset(int kvoff, InMemValBytes vbytes) {
- // get the keystart for the next serialized value to be the end
- // of this value. If this is the last value in the buffer, use bufend
- final int nextindex = kvoff == kvend
- ? bufend
- : kvmeta.get(
- (kvoff - NMETA + kvmeta.capacity() + KEYSTART) % kvmeta.capacity());
- // calculate the length of the value
- int vallen = (nextindex >= kvmeta.get(kvoff + VALSTART))
- ? nextindex - kvmeta.get(kvoff + VALSTART)
- : (bufvoid - kvmeta.get(kvoff + VALSTART)) + nextindex;
- vbytes.reset(kvbuffer, kvmeta.get(kvoff + VALSTART), vallen);
- }
-
- /**
- * Inner class wrapping valuebytes, used for appendRaw.
- */
- protected class InMemValBytes extends DataInputBuffer {
- private byte[] buffer;
- private int start;
- private int length;
-
- public void reset(byte[] buffer, int start, int length) {
- this.buffer = buffer;
- this.start = start;
- this.length = length;
-
- if (start + length > bufvoid) {
- this.buffer = new byte[this.length];
- final int taillen = bufvoid - start;
- System.arraycopy(buffer, start, this.buffer, 0, taillen);
- System.arraycopy(buffer, 0, this.buffer, taillen, length-taillen);
- this.start = 0;
- }
-
- super.reset(this.buffer, this.start, this.length);
- }
- }
-
- protected class MRResultIterator implements RawKeyValueIterator {
- private final DataInputBuffer keybuf = new DataInputBuffer();
- private final InMemValBytes vbytes = new InMemValBytes();
- private final int end;
- private int current;
- public MRResultIterator(int start, int end) {
- this.end = end;
- current = start - 1;
- }
- public boolean next() throws IOException {
- return ++current < end;
- }
- public DataInputBuffer getKey() throws IOException {
- final int kvoff = offsetFor(current % maxRec);
- keybuf.reset(kvbuffer, kvmeta.get(kvoff + KEYSTART),
- kvmeta.get(kvoff + VALSTART) - kvmeta.get(kvoff + KEYSTART));
- return keybuf;
- }
- public DataInputBuffer getValue() throws IOException {
- getVBytesForOffset(offsetFor(current % maxRec), vbytes);
- return vbytes;
- }
- public Progress getProgress() {
- return null;
- }
- public void close() { }
- }
-
- private void mergeParts() throws IOException, InterruptedException,
- ClassNotFoundException {
- // get the approximate size of the final output/index files
- long finalOutFileSize = 0;
- long finalIndexFileSize = 0;
- final Path[] filename = new Path[numSpills];
- final TaskAttemptID mapId = getTaskID();
-
- for(int i = 0; i < numSpills; i++) {
- filename[i] = mapOutputFile.getSpillFile(i);
- finalOutFileSize += rfs.getFileStatus(filename[i]).getLen();
- }
- if (numSpills == 1) { //the spill is the final output
- rfs.rename(filename[0],
- mapOutputFile.getOutputFileForWriteInVolume(filename[0]));
- if (indexCacheList.size() == 0) {
- rfs.rename(mapOutputFile.getSpillIndexFile(0),
- mapOutputFile.getOutputIndexFileForWriteInVolume(filename[0]));
- } else {
- indexCacheList.get(0).writeToFile(
- mapOutputFile.getOutputIndexFileForWriteInVolume(filename[0]), job);
- }
- return;
- }
-
- // read in paged indices
- for (int i = indexCacheList.size(); i < numSpills; ++i) {
- Path indexFileName = mapOutputFile.getSpillIndexFile(i);
- indexCacheList.add(new SpillRecord(indexFileName, job));
- }
-
- //make correction in the length to include the sequence file header
- //lengths for each partition
- finalOutFileSize += partitions * APPROX_HEADER_LENGTH;
- finalIndexFileSize = partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH;
- Path finalOutputFile =
- mapOutputFile.getOutputFileForWrite(finalOutFileSize);
- Path finalIndexFile =
- mapOutputFile.getOutputIndexFileForWrite(finalIndexFileSize);
-
- //The output stream for the final single output file
- FSDataOutputStream finalOut = rfs.create(finalOutputFile, true, 4096);
-
- if (numSpills == 0) {
- //create dummy files
- IndexRecord rec = new IndexRecord();
- SpillRecord sr = new SpillRecord(partitions);
- try {
- for (int i = 0; i < partitions; i++) {
- long segmentStart = finalOut.getPos();
- Writer<K, V> writer =
- new Writer<K, V>(job, finalOut, keyClass, valClass, codec, null);
- writer.close();
- rec.startOffset = segmentStart;
- rec.rawLength = writer.getRawLength();
- rec.partLength = writer.getCompressedLength();
- sr.putIndex(rec, i);
- }
- sr.writeToFile(finalIndexFile, job);
- } finally {
- finalOut.close();
- }
- return;
- }
- {
- sortPhase.addPhases(partitions); // Divide sort phase into sub-phases
- Merger.considerFinalMergeForProgress();
-
- IndexRecord rec = new IndexRecord();
- final SpillRecord spillRec = new SpillRecord(partitions);
- for (int parts = 0; parts < partitions; parts++) {
- //create the segments to be merged
- List<Segment<K,V>> segmentList =
- new ArrayList<Segment<K, V>>(numSpills);
- for(int i = 0; i < numSpills; i++) {
- IndexRecord indexRecord = indexCacheList.get(i).getIndex(parts);
-
- Segment<K,V> s =
- new Segment<K,V>(job, rfs, filename[i], indexRecord.startOffset,
- indexRecord.partLength, codec, true);
- segmentList.add(i, s);
-
- if (LOG.isDebugEnabled()) {
- LOG.debug("MapId=" + mapId + " Reducer=" + parts +
- "Spill =" + i + "(" + indexRecord.startOffset + "," +
- indexRecord.rawLength + ", " + indexRecord.partLength + ")");
- }
- }
-
- int mergeFactor = job.getInt(JobContext.IO_SORT_FACTOR, 100);
- // sort the segments only if there are intermediate merges
- boolean sortSegments = segmentList.size() > mergeFactor;
- //merge
- @SuppressWarnings("unchecked")
- RawKeyValueIterator kvIter = Merger.merge(job, rfs,
- keyClass, valClass, codec,
- segmentList, mergeFactor,
- new Path(mapId.toString()),
- job.getOutputKeyComparator(), reporter, sortSegments,
- null, spilledRecordsCounter, sortPhase.phase());
-
- //write merged output to disk
- long segmentStart = finalOut.getPos();
- Writer<K, V> writer =
- new Writer<K, V>(job, finalOut, keyClass, valClass, codec,
- spilledRecordsCounter);
- if (combinerRunner == null || numSpills < minSpillsForCombine) {
- Merger.writeFile(kvIter, writer, reporter, job);
- } else {
- combineCollector.setWriter(writer);
- combinerRunner.combine(kvIter, combineCollector);
- }
-
- //close
- writer.close();
-
- sortPhase.startNextPhase();
-
- // record offsets
- rec.startOffset = segmentStart;
- rec.rawLength = writer.getRawLength();
- rec.partLength = writer.getCompressedLength();
- spillRec.putIndex(rec, parts);
- }
- spillRec.writeToFile(finalIndexFile, job);
- finalOut.close();
- for(int i = 0; i < numSpills; i++) {
- rfs.delete(filename[i],true);
- }
- }
- }
-
- } // MapOutputBuffer
-
- /**
- * Exception indicating that the allocated sort buffer is insufficient
- * to hold the current record.
- */
- @SuppressWarnings("serial")
- private static class MapBufferTooSmallException extends IOException {
- public MapBufferTooSmallException(String s) {
- super(s);
- }
- }
-
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/QueueManager.java b/mapreduce/src/java/org/apache/hadoop/mapred/QueueManager.java
deleted file mode 100644
index 326d627..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapred/QueueManager.java
+++ /dev/null
@@ -1,704 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.mapred.TaskScheduler.QueueRefresher;
-import org.apache.hadoop.mapreduce.MRConfig;
-import org.apache.hadoop.mapreduce.QueueState;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authorize.AccessControlList;
-import org.apache.hadoop.util.StringUtils;
-import org.codehaus.jackson.JsonFactory;
-import org.codehaus.jackson.JsonGenerationException;
-import org.codehaus.jackson.JsonGenerator;
-
-import java.io.BufferedInputStream;
-import java.io.InputStream;
-import java.io.IOException;
-import java.io.Writer;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-import java.util.List;
-import java.net.URL;
-
-
-/**
- * Class that exposes information about queues maintained by the Hadoop
- * Map/Reduce framework.
- * <p/>
- * The Map/Reduce framework can be configured with one or more queues,
- * depending on the scheduler it is configured with. While some
- * schedulers work only with one queue, some schedulers support multiple
- * queues. Some schedulers also support the notion of queues within
- * queues - a feature called hierarchical queues.
- * <p/>
- * Queue names are unique, and used as a key to lookup queues. Hierarchical
- * queues are named by a 'fully qualified name' such as q1:q2:q3, where
- * q2 is a child queue of q1 and q3 is a child queue of q2.
- * <p/>
- * Leaf level queues are queues that contain no queues within them. Jobs
- * can be submitted only to leaf level queues.
- * <p/>
- * Queues can be configured with various properties. Some of these
- * properties are common to all schedulers, and those are handled by this
- * class. Schedulers might also associate several custom properties with
- * queues. These properties are parsed and maintained per queue by the
- * framework. If schedulers need more complicated structure to maintain
- * configuration per queue, they are free to not use the facilities
- * provided by the framework, but define their own mechanisms. In such cases,
- * it is likely that the name of the queue will be used to relate the
- * common properties of a queue with scheduler specific properties.
- * <p/>
- * Information related to a queue, such as its name, properties, scheduling
- * information and children are exposed by this class via a serializable
- * class called {@link JobQueueInfo}.
- * <p/>
- * Queues are configured in the configuration file mapred-queues.xml.
- * To support backwards compatibility, queues can also be configured
- * in mapred-site.xml. However, when configured in the latter, there is
- * no support for hierarchical queues.
- */
-@InterfaceAudience.Private
-public class QueueManager {
-
- private static final Log LOG = LogFactory.getLog(QueueManager.class);
-
- // Map of a queue name and Queue object
- private Map<String, Queue> leafQueues = new HashMap<String,Queue>();
- private Map<String, Queue> allQueues = new HashMap<String, Queue>();
- public static final String QUEUE_CONF_FILE_NAME = "mapred-queues.xml";
- static final String QUEUE_CONF_DEFAULT_FILE_NAME = "mapred-queues-default.xml";
-
- //Prefix in configuration for queue related keys
- static final String QUEUE_CONF_PROPERTY_NAME_PREFIX = "mapred.queue.";
-
- //Resource in which queue acls are configured.
- private Queue root = null;
-
- // represents if job and queue acls are enabled on the mapreduce cluster
- private boolean areAclsEnabled = false;
-
- /**
- * Factory method to create an appropriate instance of a queue
- * configuration parser.
- * <p/>
- * Returns a parser that can parse either the deprecated property
- * style queue configuration in mapred-site.xml, or one that can
- * parse hierarchical queues in mapred-queues.xml. First preference
- * is given to configuration in mapred-site.xml. If no queue
- * configuration is found there, then a parser that can parse
- * configuration in mapred-queues.xml is created.
- *
- * @param conf Configuration instance that determines which parser
- * to use.
- * @return Queue configuration parser
- */
- static QueueConfigurationParser getQueueConfigurationParser(
- Configuration conf, boolean reloadConf, boolean areAclsEnabled) {
- if (conf != null && conf.get(
- DeprecatedQueueConfigurationParser.MAPRED_QUEUE_NAMES_KEY) != null) {
- if (reloadConf) {
- conf.reloadConfiguration();
- }
- return new DeprecatedQueueConfigurationParser(conf);
- } else {
- URL xmlInUrl =
- Thread.currentThread().getContextClassLoader()
- .getResource(QUEUE_CONF_FILE_NAME);
- if (xmlInUrl == null) {
- xmlInUrl = Thread.currentThread().getContextClassLoader()
- .getResource(QUEUE_CONF_DEFAULT_FILE_NAME);
- assert xmlInUrl != null; // this should be in our jar
- }
- InputStream stream = null;
- try {
- stream = xmlInUrl.openStream();
- return new QueueConfigurationParser(new BufferedInputStream(stream),
- areAclsEnabled);
- } catch (IOException ioe) {
- throw new RuntimeException("Couldn't open queue configuration at " +
- xmlInUrl, ioe);
- } finally {
- IOUtils.closeStream(stream);
- }
- }
- }
-
- QueueManager() {// acls are disabled
- this(false);
- }
-
- QueueManager(boolean areAclsEnabled) {
- this.areAclsEnabled = areAclsEnabled;
- initialize(getQueueConfigurationParser(null, false, areAclsEnabled));
- }
-
- /**
- * Construct a new QueueManager using configuration specified in the passed
- * in {@link org.apache.hadoop.conf.Configuration} object.
- * <p/>
- * This instance supports queue configuration specified in mapred-site.xml,
- * but without support for hierarchical queues. If no queue configuration
- * is found in mapred-site.xml, it will then look for site configuration
- * in mapred-queues.xml supporting hierarchical queues.
- *
- * @param clusterConf mapreduce cluster configuration
- */
- public QueueManager(Configuration clusterConf) {
- areAclsEnabled = clusterConf.getBoolean(MRConfig.MR_ACLS_ENABLED, false);
- initialize(getQueueConfigurationParser(clusterConf, false, areAclsEnabled));
- }
-
- /**
- * Create an instance that supports hierarchical queues, defined in
- * the passed in configuration file.
- * <p/>
- * This is mainly used for testing purposes and should not called from
- * production code.
- *
- * @param confFile File where the queue configuration is found.
- */
- QueueManager(String confFile, boolean areAclsEnabled) {
- this.areAclsEnabled = areAclsEnabled;
- QueueConfigurationParser cp =
- new QueueConfigurationParser(confFile, areAclsEnabled);
- initialize(cp);
- }
-
- /**
- * Initialize the queue-manager with the queue hierarchy specified by the
- * given {@link QueueConfigurationParser}.
- *
- * @param cp
- */
- private void initialize(QueueConfigurationParser cp) {
- this.root = cp.getRoot();
- leafQueues.clear();
- allQueues.clear();
- //At this point we have root populated
- //update data structures leafNodes.
- leafQueues = getRoot().getLeafQueues();
- allQueues.putAll(getRoot().getInnerQueues());
- allQueues.putAll(leafQueues);
-
- LOG.info("AllQueues : " + allQueues + "; LeafQueues : " + leafQueues);
- }
-
- /**
- * Return the set of leaf level queues configured in the system to
- * which jobs are submitted.
- * <p/>
- * The number of queues configured should be dependent on the Scheduler
- * configured. Note that some schedulers work with only one queue, whereas
- * others can support multiple queues.
- *
- * @return Set of queue names.
- */
- public synchronized Set<String> getLeafQueueNames() {
- return leafQueues.keySet();
- }
-
- /**
- * Return true if the given user is part of the ACL for the given
- * {@link QueueACL} name for the given queue.
- * <p/>
- * An operation is allowed if all users are provided access for this
- * operation, or if either the user or any of the groups specified is
- * provided access.
- *
- * @param queueName Queue on which the operation needs to be performed.
- * @param qACL The queue ACL name to be checked
- * @param ugi The user and groups who wish to perform the operation.
- * @return true if the operation is allowed, false otherwise.
- */
- public synchronized boolean hasAccess(
- String queueName, QueueACL qACL, UserGroupInformation ugi) {
-
- Queue q = leafQueues.get(queueName);
-
- if (q == null) {
- LOG.info("Queue " + queueName + " is not present");
- return false;
- }
-
- if(q.getChildren() != null && !q.getChildren().isEmpty()) {
- LOG.info("Cannot submit job to parent queue " + q.getName());
- return false;
- }
-
- if (!areAclsEnabled()) {
- return true;
- }
-
- if (LOG.isDebugEnabled()) {
- LOG.debug("Checking access for the acl " + toFullPropertyName(queueName,
- qACL.getAclName()) + " for user " + ugi.getShortUserName());
- }
-
- AccessControlList acl = q.getAcls().get(
- toFullPropertyName(queueName, qACL.getAclName()));
- if (acl == null) {
- return false;
- }
-
- // Check if user is part of the ACL
- return acl.isUserAllowed(ugi);
- }
-
- /**
- * Checks whether the given queue is running or not.
- *
- * @param queueName name of the queue
- * @return true, if the queue is running.
- */
- synchronized boolean isRunning(String queueName) {
- Queue q = leafQueues.get(queueName);
- if (q != null) {
- return q.getState().equals(QueueState.RUNNING);
- }
- return false;
- }
-
- /**
- * Set a generic Object that represents scheduling information relevant
- * to a queue.
- * <p/>
- * A string representation of this Object will be used by the framework
- * to display in user facing applications like the JobTracker web UI and
- * the hadoop CLI.
- *
- * @param queueName queue for which the scheduling information is to be set.
- * @param queueInfo scheduling information for this queue.
- */
- public synchronized void setSchedulerInfo(
- String queueName,
- Object queueInfo) {
- if (allQueues.get(queueName) != null) {
- allQueues.get(queueName).setSchedulingInfo(queueInfo);
- }
- }
-
- /**
- * Return the scheduler information configured for this queue.
- *
- * @param queueName queue for which the scheduling information is required.
- * @return The scheduling information for this queue.
- */
- public synchronized Object getSchedulerInfo(String queueName) {
- if (allQueues.get(queueName) != null) {
- return allQueues.get(queueName).getSchedulingInfo();
- }
- return null;
- }
-
- static final String MSG_REFRESH_FAILURE_WITH_CHANGE_OF_HIERARCHY =
- "Unable to refresh queues because queue-hierarchy changed. "
- + "Retaining existing configuration. ";
-
- static final String MSG_REFRESH_FAILURE_WITH_SCHEDULER_FAILURE =
- "Scheduler couldn't refresh it's queues with the new"
- + " configuration properties. "
- + "Retaining existing configuration throughout the system.";
-
- /**
- * Refresh acls, state and scheduler properties for the configured queues.
- * <p/>
- * This method reloads configuration related to queues, but does not
- * support changes to the list of queues or hierarchy. The expected usage
- * is that an administrator can modify the queue configuration file and
- * fire an admin command to reload queue configuration. If there is a
- * problem in reloading configuration, then this method guarantees that
- * existing queue configuration is untouched and in a consistent state.
- *
- * @param schedulerRefresher
- * @throws IOException when queue configuration file is invalid.
- */
- synchronized void refreshQueues(Configuration conf,
- QueueRefresher schedulerRefresher)
- throws IOException {
-
- // Create a new configuration parser using the passed conf object.
- QueueConfigurationParser cp =
- getQueueConfigurationParser(conf, true, areAclsEnabled);
-
- /*
- * (1) Validate the refresh of properties owned by QueueManager. As of now,
- * while refreshing queue properties, we only check that the hierarchy is
- * the same w.r.t queue names, ACLs and state for each queue and don't
- * support adding new queues or removing old queues
- */
- if (!root.isHierarchySameAs(cp.getRoot())) {
- LOG.warn(MSG_REFRESH_FAILURE_WITH_CHANGE_OF_HIERARCHY);
- throw new IOException(MSG_REFRESH_FAILURE_WITH_CHANGE_OF_HIERARCHY);
- }
-
- /*
- * (2) QueueManager owned properties are validated. Now validate and
- * refresh the properties of scheduler in a single step.
- */
- if (schedulerRefresher != null) {
- try {
- schedulerRefresher.refreshQueues(cp.getRoot().getJobQueueInfo().getChildren());
- } catch (Throwable e) {
- StringBuilder msg =
- new StringBuilder(
- "Scheduler's refresh-queues failed with the exception : "
- + StringUtils.stringifyException(e));
- msg.append("\n");
- msg.append(MSG_REFRESH_FAILURE_WITH_SCHEDULER_FAILURE);
- LOG.error(msg.toString());
- throw new IOException(msg.toString());
- }
- }
-
- /*
- * (3) Scheduler has validated and refreshed its queues successfully, now
- * refresh the properties owned by QueueManager
- */
-
- // First copy the scheduling information recursively into the new
- // queue-hierarchy. This is done to retain old scheduling information. This
- // is done after scheduler refresh and not before it because during refresh,
- // schedulers may wish to change their scheduling info objects too.
- cp.getRoot().copySchedulingInfo(this.root);
-
- // Now switch roots.
- initialize(cp);
-
- LOG.info("Queue configuration is refreshed successfully.");
- }
-
- // this method is for internal use only
- public static final String toFullPropertyName(
- String queue,
- String property) {
- return QUEUE_CONF_PROPERTY_NAME_PREFIX + queue + "." + property;
- }
-
- /**
- * Return an array of {@link JobQueueInfo} objects for all the
- * queues configurated in the system.
- *
- * @return array of JobQueueInfo objects.
- */
- synchronized JobQueueInfo[] getJobQueueInfos() {
- ArrayList<JobQueueInfo> queueInfoList = new ArrayList<JobQueueInfo>();
- for (String queue : allQueues.keySet()) {
- JobQueueInfo queueInfo = getJobQueueInfo(queue);
- if (queueInfo != null) {
- queueInfoList.add(queueInfo);
- }
- }
- return queueInfoList.toArray(
- new JobQueueInfo[queueInfoList.size()]);
- }
-
-
- /**
- * Return {@link JobQueueInfo} for a given queue.
- *
- * @param queue name of the queue
- * @return JobQueueInfo for the queue, null if the queue is not found.
- */
- synchronized JobQueueInfo getJobQueueInfo(String queue) {
- if (allQueues.containsKey(queue)) {
- return allQueues.get(queue).getJobQueueInfo();
- }
-
- return null;
- }
-
- /**
- * JobQueueInfo for all the queues.
- * <p/>
- * Contribs can use this data structure to either create a hierarchy or for
- * traversing.
- * They can also use this to refresh properties in case of refreshQueues
- *
- * @return a map for easy navigation.
- */
- synchronized Map<String, JobQueueInfo> getJobQueueInfoMapping() {
- Map<String, JobQueueInfo> m = new HashMap<String, JobQueueInfo>();
-
- for (String key : allQueues.keySet()) {
- m.put(key, allQueues.get(key).getJobQueueInfo());
- }
-
- return m;
- }
-
- /**
- * Generates the array of QueueAclsInfo object.
- * <p/>
- * The array consists of only those queues for which user has acls.
- *
- * @return QueueAclsInfo[]
- * @throws java.io.IOException
- */
- synchronized QueueAclsInfo[] getQueueAcls(UserGroupInformation ugi)
- throws IOException {
- //List of all QueueAclsInfo objects , this list is returned
- ArrayList<QueueAclsInfo> queueAclsInfolist =
- new ArrayList<QueueAclsInfo>();
- QueueACL[] qAcls = QueueACL.values();
- for (String queueName : leafQueues.keySet()) {
- QueueAclsInfo queueAclsInfo = null;
- ArrayList<String> operationsAllowed = null;
- for (QueueACL qAcl : qAcls) {
- if (hasAccess(queueName, qAcl, ugi)) {
- if (operationsAllowed == null) {
- operationsAllowed = new ArrayList<String>();
- }
- operationsAllowed.add(qAcl.getAclName());
- }
- }
- if (operationsAllowed != null) {
- //There is atleast 1 operation supported for queue <queueName>
- //, hence initialize queueAclsInfo
- queueAclsInfo = new QueueAclsInfo(
- queueName, operationsAllowed.toArray
- (new String[operationsAllowed.size()]));
- queueAclsInfolist.add(queueAclsInfo);
- }
- }
- return queueAclsInfolist.toArray(
- new QueueAclsInfo[queueAclsInfolist.size()]);
- }
-
- /**
- * ONLY FOR TESTING - Do not use in production code.
- * This method is used for setting up of leafQueues only.
- * We are not setting the hierarchy here.
- *
- * @param queues
- */
- synchronized void setQueues(Queue[] queues) {
- root.getChildren().clear();
- leafQueues.clear();
- allQueues.clear();
-
- for (Queue queue : queues) {
- root.addChild(queue);
- }
- //At this point we have root populated
- //update data structures leafNodes.
- leafQueues = getRoot().getLeafQueues();
- allQueues.putAll(getRoot().getInnerQueues());
- allQueues.putAll(leafQueues);
- }
-
- /**
- * Return an array of {@link JobQueueInfo} objects for the root
- * queues configured in the system.
- * <p/>
- * Root queues are queues that are at the top-most level in the
- * hierarchy of queues in mapred-queues.xml, or they are the queues
- * configured in the mapred.queue.names key in mapred-site.xml.
- *
- * @return array of JobQueueInfo objects for root level queues.
- */
-
- JobQueueInfo[] getRootQueues() {
- List<JobQueueInfo> list = getRoot().getJobQueueInfo().getChildren();
- return list.toArray(new JobQueueInfo[list.size()]);
- }
-
- /**
- * Get the complete hierarchy of children for queue
- * queueName
- *
- * @param queueName
- * @return
- */
- JobQueueInfo[] getChildQueues(String queueName) {
- List<JobQueueInfo> list =
- allQueues.get(queueName).getJobQueueInfo().getChildren();
- if (list != null) {
- return list.toArray(new JobQueueInfo[list.size()]);
- } else {
- return new JobQueueInfo[0];
- }
- }
-
- /**
- * Used only for testing purposes .
- * This method is unstable as refreshQueues would leave this
- * data structure in unstable state.
- *
- * @param queueName
- * @return
- */
- Queue getQueue(String queueName) {
- return this.allQueues.get(queueName);
- }
-
-
- /**
- * Return if ACLs are enabled for the Map/Reduce system
- *
- * @return true if ACLs are enabled.
- */
- boolean areAclsEnabled() {
- return areAclsEnabled;
- }
-
- /**
- * Used only for test.
- *
- * @return
- */
- Queue getRoot() {
- return root;
- }
-
- /**
- * Returns the specific queue ACL for the given queue.
- * Returns null if the given queue does not exist or the acl is not
- * configured for that queue.
- * If acls are disabled(mapreduce.cluster.acls.enabled set to false), returns
- * ACL with all users.
- */
- synchronized AccessControlList getQueueACL(String queueName,
- QueueACL qACL) {
- if (areAclsEnabled) {
- Queue q = leafQueues.get(queueName);
- if (q != null) {
- return q.getAcls().get(toFullPropertyName(
- queueName, qACL.getAclName()));
- }
- else {
- LOG.warn("Queue " + queueName + " is not present.");
- return null;
- }
- }
- return new AccessControlList("*");
- }
-
- /**
- * Dumps the configuration of hierarchy of queues
- * @param out the writer object to which dump is written
- * @throws IOException
- */
- static void dumpConfiguration(Writer out,Configuration conf) throws IOException {
- dumpConfiguration(out, null,conf);
- }
-
- /***
- * Dumps the configuration of hierarchy of queues with
- * the xml file path given. It is to be used directly ONLY FOR TESTING.
- * @param out the writer object to which dump is written to.
- * @param configFile the filename of xml file
- * @throws IOException
- */
- static void dumpConfiguration(Writer out, String configFile,
- Configuration conf) throws IOException {
- if (conf != null && conf.get(DeprecatedQueueConfigurationParser.
- MAPRED_QUEUE_NAMES_KEY) != null) {
- return;
- }
-
- JsonFactory dumpFactory = new JsonFactory();
- JsonGenerator dumpGenerator = dumpFactory.createJsonGenerator(out);
- QueueConfigurationParser parser;
- boolean aclsEnabled = false;
- if (conf != null) {
- aclsEnabled = conf.getBoolean(MRConfig.MR_ACLS_ENABLED, false);
- }
- if (configFile != null && !"".equals(configFile)) {
- parser = new QueueConfigurationParser(configFile, aclsEnabled);
- }
- else {
- parser = getQueueConfigurationParser(null, false, aclsEnabled);
- }
- dumpGenerator.writeStartObject();
- dumpGenerator.writeFieldName("queues");
- dumpGenerator.writeStartArray();
- dumpConfiguration(dumpGenerator,parser.getRoot().getChildren());
- dumpGenerator.writeEndArray();
- dumpGenerator.writeEndObject();
- dumpGenerator.flush();
- }
-
- /**
- * method to perform depth-first search and write the parameters of every
- * queue in JSON format.
- * @param dumpGenerator JsonGenerator object which takes the dump and flushes
- * to a writer object
- * @param rootQueues the top-level queues
- * @throws JsonGenerationException
- * @throws IOException
- */
- private static void dumpConfiguration(JsonGenerator dumpGenerator,
- Set<Queue> rootQueues) throws JsonGenerationException, IOException {
- for (Queue queue : rootQueues) {
- dumpGenerator.writeStartObject();
- dumpGenerator.writeStringField("name", queue.getName());
- dumpGenerator.writeStringField("state", queue.getState().toString());
- AccessControlList submitJobList = null;
- AccessControlList administerJobsList = null;
- if (queue.getAcls() != null) {
- submitJobList =
- queue.getAcls().get(toFullPropertyName(queue.getName(),
- QueueACL.SUBMIT_JOB.getAclName()));
- administerJobsList =
- queue.getAcls().get(toFullPropertyName(queue.getName(),
- QueueACL.ADMINISTER_JOBS.getAclName()));
- }
- String aclsSubmitJobValue = " ";
- if (submitJobList != null ) {
- aclsSubmitJobValue = submitJobList.getAclString();
- }
- dumpGenerator.writeStringField("acl_submit_job", aclsSubmitJobValue);
- String aclsAdministerValue = " ";
- if (administerJobsList != null) {
- aclsAdministerValue = administerJobsList.getAclString();
- }
- dumpGenerator.writeStringField("acl_administer_jobs",
- aclsAdministerValue);
- dumpGenerator.writeFieldName("properties");
- dumpGenerator.writeStartArray();
- if (queue.getProperties() != null) {
- for (Map.Entry<Object, Object>property :
- queue.getProperties().entrySet()) {
- dumpGenerator.writeStartObject();
- dumpGenerator.writeStringField("key", (String)property.getKey());
- dumpGenerator.writeStringField("value", (String)property.getValue());
- dumpGenerator.writeEndObject();
- }
- }
- dumpGenerator.writeEndArray();
- Set<Queue> childQueues = queue.getChildren();
- dumpGenerator.writeFieldName("children");
- dumpGenerator.writeStartArray();
- if (childQueues != null && childQueues.size() > 0) {
- dumpConfiguration(dumpGenerator, childQueues);
- }
- dumpGenerator.writeEndArray();
- dumpGenerator.writeEndObject();
- }
- }
-
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/ReduceTask.java b/mapreduce/src/java/org/apache/hadoop/mapred/ReduceTask.java
deleted file mode 100644
index 5e6822a..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapred/ReduceTask.java
+++ /dev/null
@@ -1,625 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Comparator;
-import java.util.List;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileSystem.Statistics;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.DataInputBuffer;
-import org.apache.hadoop.io.RawComparator;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
-import org.apache.hadoop.io.SequenceFile.CompressionType;
-import org.apache.hadoop.io.compress.CompressionCodec;
-import org.apache.hadoop.io.compress.DefaultCodec;
-import org.apache.hadoop.mapred.SortedRanges.SkipRangeIterator;
-import org.apache.hadoop.mapred.TaskTracker.TaskInProgress;
-import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.mapreduce.TaskCounter;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter;
-import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
-import org.apache.hadoop.mapreduce.task.reduce.Shuffle;
-import org.apache.hadoop.util.Progress;
-import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.util.ReflectionUtils;
-
-/** A Reduce task. */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class ReduceTask extends Task {
-
- static { // register a ctor
- WritableFactories.setFactory
- (ReduceTask.class,
- new WritableFactory() {
- public Writable newInstance() { return new ReduceTask(); }
- });
- }
-
- private static final Log LOG = LogFactory.getLog(ReduceTask.class.getName());
- private int numMaps;
-
- private CompressionCodec codec;
-
-
- {
- getProgress().setStatus("reduce");
- setPhase(TaskStatus.Phase.SHUFFLE); // phase to start with
- }
-
- private Progress copyPhase;
- private Progress sortPhase;
- private Progress reducePhase;
- private Counters.Counter shuffledMapsCounter =
- getCounters().findCounter(TaskCounter.SHUFFLED_MAPS);
- private Counters.Counter reduceShuffleBytes =
- getCounters().findCounter(TaskCounter.REDUCE_SHUFFLE_BYTES);
- private Counters.Counter reduceInputKeyCounter =
- getCounters().findCounter(TaskCounter.REDUCE_INPUT_GROUPS);
- private Counters.Counter reduceInputValueCounter =
- getCounters().findCounter(TaskCounter.REDUCE_INPUT_RECORDS);
- private Counters.Counter reduceOutputCounter =
- getCounters().findCounter(TaskCounter.REDUCE_OUTPUT_RECORDS);
- private Counters.Counter reduceCombineInputCounter =
- getCounters().findCounter(TaskCounter.COMBINE_INPUT_RECORDS);
- private Counters.Counter reduceCombineOutputCounter =
- getCounters().findCounter(TaskCounter.COMBINE_OUTPUT_RECORDS);
- private Counters.Counter fileOutputByteCounter =
- getCounters().findCounter(FileOutputFormatCounter.BYTES_WRITTEN);
-
- // A custom comparator for map output files. Here the ordering is determined
- // by the file's size and path. In case of files with same size and different
- // file paths, the first parameter is considered smaller than the second one.
- // In case of files with same size and path are considered equal.
- private Comparator<FileStatus> mapOutputFileComparator =
- new Comparator<FileStatus>() {
- public int compare(FileStatus a, FileStatus b) {
- if (a.getLen() < b.getLen())
- return -1;
- else if (a.getLen() == b.getLen())
- if (a.getPath().toString().equals(b.getPath().toString()))
- return 0;
- else
- return -1;
- else
- return 1;
- }
- };
-
- // A sorted set for keeping a set of map output files on disk
- private final SortedSet<FileStatus> mapOutputFilesOnDisk =
- new TreeSet<FileStatus>(mapOutputFileComparator);
-
- public ReduceTask() {
- super();
- }
-
- public ReduceTask(String jobFile, TaskAttemptID taskId,
- int partition, int numMaps, int numSlotsRequired) {
- super(jobFile, taskId, partition, numSlotsRequired);
- this.numMaps = numMaps;
- }
-
- private CompressionCodec initCodec() {
- // check if map-outputs are to be compressed
- if (conf.getCompressMapOutput()) {
- Class<? extends CompressionCodec> codecClass =
- conf.getMapOutputCompressorClass(DefaultCodec.class);
- return ReflectionUtils.newInstance(codecClass, conf);
- }
-
- return null;
- }
-
- @Override
- public TaskRunner createRunner(TaskTracker tracker, TaskInProgress tip)
- throws IOException {
- return new ReduceTaskRunner(tip, tracker, this.conf);
- }
-
- @Override
- public boolean isMapTask() {
- return false;
- }
-
- public int getNumMaps() { return numMaps; }
-
- /**
- * Localize the given JobConf to be specific for this task.
- */
- @Override
- public void localizeConfiguration(JobConf conf) throws IOException {
- super.localizeConfiguration(conf);
- conf.setNumMapTasks(numMaps);
- }
-
- @Override
- public void write(DataOutput out) throws IOException {
- super.write(out);
-
- out.writeInt(numMaps); // write the number of maps
- }
-
- @Override
- public void readFields(DataInput in) throws IOException {
- super.readFields(in);
-
- numMaps = in.readInt();
- }
-
- // Get the input files for the reducer.
- private Path[] getMapFiles(FileSystem fs, boolean isLocal)
- throws IOException {
- List<Path> fileList = new ArrayList<Path>();
- if (isLocal) {
- // for local jobs
- for(int i = 0; i < numMaps; ++i) {
- fileList.add(mapOutputFile.getInputFile(i));
- }
- } else {
- // for non local jobs
- for (FileStatus filestatus : mapOutputFilesOnDisk) {
- fileList.add(filestatus.getPath());
- }
- }
- return fileList.toArray(new Path[0]);
- }
-
- private class ReduceValuesIterator<KEY,VALUE>
- extends ValuesIterator<KEY,VALUE> {
- public ReduceValuesIterator (RawKeyValueIterator in,
- RawComparator<KEY> comparator,
- Class<KEY> keyClass,
- Class<VALUE> valClass,
- Configuration conf, Progressable reporter)
- throws IOException {
- super(in, comparator, keyClass, valClass, conf, reporter);
- }
-
- @Override
- public VALUE next() {
- reduceInputValueCounter.increment(1);
- return moveToNext();
- }
-
- protected VALUE moveToNext() {
- return super.next();
- }
-
- public void informReduceProgress() {
- reducePhase.set(super.in.getProgress().getProgress()); // update progress
- reporter.progress();
- }
- }
-
- private class SkippingReduceValuesIterator<KEY,VALUE>
- extends ReduceValuesIterator<KEY,VALUE> {
- private SkipRangeIterator skipIt;
- private TaskUmbilicalProtocol umbilical;
- private Counters.Counter skipGroupCounter;
- private Counters.Counter skipRecCounter;
- private long grpIndex = -1;
- private Class<KEY> keyClass;
- private Class<VALUE> valClass;
- private SequenceFile.Writer skipWriter;
- private boolean toWriteSkipRecs;
- private boolean hasNext;
- private TaskReporter reporter;
-
- public SkippingReduceValuesIterator(RawKeyValueIterator in,
- RawComparator<KEY> comparator, Class<KEY> keyClass,
- Class<VALUE> valClass, Configuration conf, TaskReporter reporter,
- TaskUmbilicalProtocol umbilical) throws IOException {
- super(in, comparator, keyClass, valClass, conf, reporter);
- this.umbilical = umbilical;
- this.skipGroupCounter =
- reporter.getCounter(TaskCounter.REDUCE_SKIPPED_GROUPS);
- this.skipRecCounter =
- reporter.getCounter(TaskCounter.REDUCE_SKIPPED_RECORDS);
- this.toWriteSkipRecs = toWriteSkipRecs() &&
- SkipBadRecords.getSkipOutputPath(conf)!=null;
- this.keyClass = keyClass;
- this.valClass = valClass;
- this.reporter = reporter;
- skipIt = getSkipRanges().skipRangeIterator();
- mayBeSkip();
- }
-
- public void nextKey() throws IOException {
- super.nextKey();
- mayBeSkip();
- }
-
- public boolean more() {
- return super.more() && hasNext;
- }
-
- private void mayBeSkip() throws IOException {
- hasNext = skipIt.hasNext();
- if(!hasNext) {
- LOG.warn("Further groups got skipped.");
- return;
- }
- grpIndex++;
- long nextGrpIndex = skipIt.next();
- long skip = 0;
- long skipRec = 0;
- while(grpIndex<nextGrpIndex && super.more()) {
- while (hasNext()) {
- VALUE value = moveToNext();
- if(toWriteSkipRecs) {
- writeSkippedRec(getKey(), value);
- }
- skipRec++;
- }
- super.nextKey();
- grpIndex++;
- skip++;
- }
-
- //close the skip writer once all the ranges are skipped
- if(skip>0 && skipIt.skippedAllRanges() && skipWriter!=null) {
- skipWriter.close();
- }
- skipGroupCounter.increment(skip);
- skipRecCounter.increment(skipRec);
- reportNextRecordRange(umbilical, grpIndex);
- }
-
- @SuppressWarnings("unchecked")
- private void writeSkippedRec(KEY key, VALUE value) throws IOException{
- if(skipWriter==null) {
- Path skipDir = SkipBadRecords.getSkipOutputPath(conf);
- Path skipFile = new Path(skipDir, getTaskID().toString());
- skipWriter = SequenceFile.createWriter(
- skipFile.getFileSystem(conf), conf, skipFile,
- keyClass, valClass,
- CompressionType.BLOCK, reporter);
- }
- skipWriter.append(key, value);
- }
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public void run(JobConf job, final TaskUmbilicalProtocol umbilical)
- throws IOException, InterruptedException, ClassNotFoundException {
- job.setBoolean(JobContext.SKIP_RECORDS, isSkipping());
-
- if (isMapOrReduce()) {
- copyPhase = getProgress().addPhase("copy");
- sortPhase = getProgress().addPhase("sort");
- reducePhase = getProgress().addPhase("reduce");
- }
- // start thread that will handle communication with parent
- TaskReporter reporter = startReporter(umbilical);
-
- boolean useNewApi = job.getUseNewReducer();
- initialize(job, getJobID(), reporter, useNewApi);
-
- // check if it is a cleanupJobTask
- if (jobCleanup) {
- runJobCleanupTask(umbilical, reporter);
- return;
- }
- if (jobSetup) {
- runJobSetupTask(umbilical, reporter);
- return;
- }
- if (taskCleanup) {
- runTaskCleanupTask(umbilical, reporter);
- return;
- }
-
- // Initialize the codec
- codec = initCodec();
- RawKeyValueIterator rIter = null;
- boolean isLocal = "local".equals(job.get(JTConfig.JT_IPC_ADDRESS, "local"));
- if (!isLocal) {
- Class combinerClass = conf.getCombinerClass();
- CombineOutputCollector combineCollector =
- (null != combinerClass) ?
- new CombineOutputCollector(reduceCombineOutputCounter, reporter, conf) : null;
-
- Shuffle shuffle =
- new Shuffle(getTaskID(), job, FileSystem.getLocal(job), umbilical,
- super.lDirAlloc, reporter, codec,
- combinerClass, combineCollector,
- spilledRecordsCounter, reduceCombineInputCounter,
- shuffledMapsCounter,
- reduceShuffleBytes, failedShuffleCounter,
- mergedMapOutputsCounter,
- taskStatus, copyPhase, sortPhase, this,
- mapOutputFile);
- rIter = shuffle.run();
- } else {
- // local job runner doesn't have a copy phase
- copyPhase.complete();
- final FileSystem rfs = FileSystem.getLocal(job).getRaw();
- rIter = Merger.merge(job, rfs, job.getMapOutputKeyClass(),
- job.getMapOutputValueClass(), codec,
- getMapFiles(rfs, true),
- !conf.getKeepFailedTaskFiles(),
- job.getInt(JobContext.IO_SORT_FACTOR, 100),
- new Path(getTaskID().toString()),
- job.getOutputKeyComparator(),
- reporter, spilledRecordsCounter, null, null);
- }
- // free up the data structures
- mapOutputFilesOnDisk.clear();
-
- sortPhase.complete(); // sort is complete
- setPhase(TaskStatus.Phase.REDUCE);
- statusUpdate(umbilical);
- Class keyClass = job.getMapOutputKeyClass();
- Class valueClass = job.getMapOutputValueClass();
- RawComparator comparator = job.getOutputValueGroupingComparator();
-
- if (useNewApi) {
- runNewReducer(job, umbilical, reporter, rIter, comparator,
- keyClass, valueClass);
- } else {
- runOldReducer(job, umbilical, reporter, rIter, comparator,
- keyClass, valueClass);
- }
- done(umbilical, reporter);
- }
-
- @SuppressWarnings("unchecked")
- private <INKEY,INVALUE,OUTKEY,OUTVALUE>
- void runOldReducer(JobConf job,
- TaskUmbilicalProtocol umbilical,
- final TaskReporter reporter,
- RawKeyValueIterator rIter,
- RawComparator<INKEY> comparator,
- Class<INKEY> keyClass,
- Class<INVALUE> valueClass) throws IOException {
- Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE> reducer =
- ReflectionUtils.newInstance(job.getReducerClass(), job);
- // make output collector
- String finalName = getOutputName(getPartition());
-
- final RecordWriter<OUTKEY, OUTVALUE> out = new OldTrackingRecordWriter<OUTKEY, OUTVALUE>(
- this, job, reporter, finalName);
-
- OutputCollector<OUTKEY,OUTVALUE> collector =
- new OutputCollector<OUTKEY,OUTVALUE>() {
- public void collect(OUTKEY key, OUTVALUE value)
- throws IOException {
- out.write(key, value);
- // indicate that progress update needs to be sent
- reporter.progress();
- }
- };
-
- // apply reduce function
- try {
- //increment processed counter only if skipping feature is enabled
- boolean incrProcCount = SkipBadRecords.getReducerMaxSkipGroups(job)>0 &&
- SkipBadRecords.getAutoIncrReducerProcCount(job);
-
- ReduceValuesIterator<INKEY,INVALUE> values = isSkipping() ?
- new SkippingReduceValuesIterator<INKEY,INVALUE>(rIter,
- comparator, keyClass, valueClass,
- job, reporter, umbilical) :
- new ReduceValuesIterator<INKEY,INVALUE>(rIter,
- job.getOutputValueGroupingComparator(), keyClass, valueClass,
- job, reporter);
- values.informReduceProgress();
- while (values.more()) {
- reduceInputKeyCounter.increment(1);
- reducer.reduce(values.getKey(), values, collector, reporter);
- if(incrProcCount) {
- reporter.incrCounter(SkipBadRecords.COUNTER_GROUP,
- SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS, 1);
- }
- values.nextKey();
- values.informReduceProgress();
- }
-
- //Clean up: repeated in catch block below
- reducer.close();
- out.close(reporter);
- //End of clean up.
- } catch (IOException ioe) {
- try {
- reducer.close();
- } catch (IOException ignored) {}
-
- try {
- out.close(reporter);
- } catch (IOException ignored) {}
-
- throw ioe;
- }
- }
-
- static class OldTrackingRecordWriter<K, V> implements RecordWriter<K, V> {
-
- private final RecordWriter<K, V> real;
- private final org.apache.hadoop.mapred.Counters.Counter reduceOutputCounter;
- private final org.apache.hadoop.mapred.Counters.Counter fileOutputByteCounter;
- private final Statistics fsStats;
-
- @SuppressWarnings({ "deprecation", "unchecked" })
- public OldTrackingRecordWriter(ReduceTask reduce, JobConf job,
- TaskReporter reporter, String finalName) throws IOException {
- this.reduceOutputCounter = reduce.reduceOutputCounter;
- this.fileOutputByteCounter = reduce.fileOutputByteCounter;
- Statistics matchedStats = null;
- if (job.getOutputFormat() instanceof FileOutputFormat) {
- matchedStats = getFsStatistics(FileOutputFormat.getOutputPath(job), job);
- }
- fsStats = matchedStats;
-
- FileSystem fs = FileSystem.get(job);
- long bytesOutPrev = getOutputBytes(fsStats);
- this.real = job.getOutputFormat().getRecordWriter(fs, job, finalName,
- reporter);
- long bytesOutCurr = getOutputBytes(fsStats);
- fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
- }
-
- @Override
- public void write(K key, V value) throws IOException {
- long bytesOutPrev = getOutputBytes(fsStats);
- real.write(key, value);
- long bytesOutCurr = getOutputBytes(fsStats);
- fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
- reduceOutputCounter.increment(1);
- }
-
- @Override
- public void close(Reporter reporter) throws IOException {
- long bytesOutPrev = getOutputBytes(fsStats);
- real.close(reporter);
- long bytesOutCurr = getOutputBytes(fsStats);
- fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
- }
-
- private long getOutputBytes(Statistics stats) {
- return stats == null ? 0 : stats.getBytesWritten();
- }
- }
-
- static class NewTrackingRecordWriter<K,V>
- extends org.apache.hadoop.mapreduce.RecordWriter<K,V> {
- private final org.apache.hadoop.mapreduce.RecordWriter<K,V> real;
- private final org.apache.hadoop.mapreduce.Counter outputRecordCounter;
- private final org.apache.hadoop.mapreduce.Counter fileOutputByteCounter;
- private final Statistics fsStats;
-
- @SuppressWarnings("unchecked")
- NewTrackingRecordWriter(ReduceTask reduce,
- org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
- throws InterruptedException, IOException {
- this.outputRecordCounter = reduce.reduceOutputCounter;
- this.fileOutputByteCounter = reduce.fileOutputByteCounter;
-
- Statistics matchedStats = null;
- if (reduce.outputFormat instanceof org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) {
- matchedStats = getFsStatistics(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
- .getOutputPath(taskContext), taskContext.getConfiguration());
- }
-
- fsStats = matchedStats;
-
- long bytesOutPrev = getOutputBytes(fsStats);
- this.real = (org.apache.hadoop.mapreduce.RecordWriter<K, V>) reduce.outputFormat
- .getRecordWriter(taskContext);
- long bytesOutCurr = getOutputBytes(fsStats);
- fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
- }
-
- @Override
- public void close(TaskAttemptContext context) throws IOException,
- InterruptedException {
- long bytesOutPrev = getOutputBytes(fsStats);
- real.close(context);
- long bytesOutCurr = getOutputBytes(fsStats);
- fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
- }
-
- @Override
- public void write(K key, V value) throws IOException, InterruptedException {
- long bytesOutPrev = getOutputBytes(fsStats);
- real.write(key,value);
- long bytesOutCurr = getOutputBytes(fsStats);
- fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
- outputRecordCounter.increment(1);
- }
-
- private long getOutputBytes(Statistics stats) {
- return stats == null ? 0 : stats.getBytesWritten();
- }
- }
-
- @SuppressWarnings("unchecked")
- private <INKEY,INVALUE,OUTKEY,OUTVALUE>
- void runNewReducer(JobConf job,
- final TaskUmbilicalProtocol umbilical,
- final TaskReporter reporter,
- RawKeyValueIterator rIter,
- RawComparator<INKEY> comparator,
- Class<INKEY> keyClass,
- Class<INVALUE> valueClass
- ) throws IOException,InterruptedException,
- ClassNotFoundException {
- // wrap value iterator to report progress.
- final RawKeyValueIterator rawIter = rIter;
- rIter = new RawKeyValueIterator() {
- public void close() throws IOException {
- rawIter.close();
- }
- public DataInputBuffer getKey() throws IOException {
- return rawIter.getKey();
- }
- public Progress getProgress() {
- return rawIter.getProgress();
- }
- public DataInputBuffer getValue() throws IOException {
- return rawIter.getValue();
- }
- public boolean next() throws IOException {
- boolean ret = rawIter.next();
- reporter.setProgress(rawIter.getProgress().getProgress());
- return ret;
- }
- };
- // make a task context so we can get the classes
- org.apache.hadoop.mapreduce.TaskAttemptContext taskContext =
- new org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl(job,
- getTaskID(), reporter);
- // make a reducer
- org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE> reducer =
- (org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>)
- ReflectionUtils.newInstance(taskContext.getReducerClass(), job);
- org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE> trackedRW =
- new NewTrackingRecordWriter<OUTKEY, OUTVALUE>(this, taskContext);
- job.setBoolean("mapred.skip.on", isSkipping());
- job.setBoolean(JobContext.SKIP_RECORDS, isSkipping());
- org.apache.hadoop.mapreduce.Reducer.Context
- reducerContext = createReduceContext(reducer, job, getTaskID(),
- rIter, reduceInputKeyCounter,
- reduceInputValueCounter,
- trackedRW,
- committer,
- reporter, comparator, keyClass,
- valueClass);
- reducer.run(reducerContext);
- trackedRW.close(reducerContext);
- }
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/SpillRecord.java b/mapreduce/src/java/org/apache/hadoop/mapred/SpillRecord.java
deleted file mode 100644
index bb91e4f..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapred/SpillRecord.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred;
-
-import java.io.DataInputStream;
-import java.io.File;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.LongBuffer;
-import java.util.zip.CheckedInputStream;
-import java.util.zip.CheckedOutputStream;
-import java.util.zip.Checksum;
-
-import org.apache.hadoop.fs.ChecksumException;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.SecureIOUtils;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.PureJavaCrc32;
-
-import static org.apache.hadoop.mapred.MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH;
-
-class SpillRecord {
-
- /** Backing store */
- private final ByteBuffer buf;
- /** View of backing storage as longs */
- private final LongBuffer entries;
-
- public SpillRecord(int numPartitions) {
- buf = ByteBuffer.allocate(
- numPartitions * MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH);
- entries = buf.asLongBuffer();
- }
-
- public SpillRecord(Path indexFileName, JobConf job) throws IOException {
- this(indexFileName, job, null);
- }
-
- public SpillRecord(Path indexFileName, JobConf job, String expectedIndexOwner)
- throws IOException {
- this(indexFileName, job, new PureJavaCrc32(), expectedIndexOwner);
- }
-
- public SpillRecord(Path indexFileName, JobConf job, Checksum crc,
- String expectedIndexOwner)
- throws IOException {
-
- final FileSystem rfs = FileSystem.getLocal(job).getRaw();
- final DataInputStream in =
- new DataInputStream(SecureIOUtils.openForRead(
- new File(indexFileName.toUri().getPath()), expectedIndexOwner, null));
- try {
- final long length = rfs.getFileStatus(indexFileName).getLen();
- final int partitions = (int) length / MAP_OUTPUT_INDEX_RECORD_LENGTH;
- final int size = partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH;
-
- buf = ByteBuffer.allocate(size);
- if (crc != null) {
- crc.reset();
- CheckedInputStream chk = new CheckedInputStream(in, crc);
- IOUtils.readFully(chk, buf.array(), 0, size);
- if (chk.getChecksum().getValue() != in.readLong()) {
- throw new ChecksumException("Checksum error reading spill index: " +
- indexFileName, -1);
- }
- } else {
- IOUtils.readFully(in, buf.array(), 0, size);
- }
- entries = buf.asLongBuffer();
- } finally {
- in.close();
- }
- }
-
- /**
- * Return number of IndexRecord entries in this spill.
- */
- public int size() {
- return entries.capacity() / (MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8);
- }
-
- /**
- * Get spill offsets for given partition.
- */
- public IndexRecord getIndex(int partition) {
- final int pos = partition * MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8;
- return new IndexRecord(entries.get(pos), entries.get(pos + 1),
- entries.get(pos + 2));
- }
-
- /**
- * Set spill offsets for given partition.
- */
- public void putIndex(IndexRecord rec, int partition) {
- final int pos = partition * MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8;
- entries.put(pos, rec.startOffset);
- entries.put(pos + 1, rec.rawLength);
- entries.put(pos + 2, rec.partLength);
- }
-
- /**
- * Write this spill record to the location provided.
- */
- public void writeToFile(Path loc, JobConf job)
- throws IOException {
- writeToFile(loc, job, new PureJavaCrc32());
- }
-
- public void writeToFile(Path loc, JobConf job, Checksum crc)
- throws IOException {
- final FileSystem rfs = FileSystem.getLocal(job).getRaw();
- CheckedOutputStream chk = null;
- final FSDataOutputStream out = rfs.create(loc);
- try {
- if (crc != null) {
- crc.reset();
- chk = new CheckedOutputStream(out, crc);
- chk.write(buf.array());
- out.writeLong(chk.getChecksum().getValue());
- } else {
- out.write(buf.array());
- }
- } finally {
- if (chk != null) {
- chk.close();
- } else {
- out.close();
- }
- }
- }
-
-}
-
-class IndexRecord {
- long startOffset;
- long rawLength;
- long partLength;
-
- public IndexRecord() { }
-
- public IndexRecord(long startOffset, long rawLength, long partLength) {
- this.startOffset = startOffset;
- this.rawLength = rawLength;
- this.partLength = partLength;
- }
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/Task.java b/mapreduce/src/java/org/apache/hadoop/mapred/Task.java
deleted file mode 100644
index 449ac07..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapred/Task.java
+++ /dev/null
@@ -1,1551 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.lang.management.GarbageCollectorMXBean;
-import java.lang.management.ManagementFactory;
-import java.text.NumberFormat;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import javax.crypto.SecretKey;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalDirAllocator;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileSystem.Statistics;
-import org.apache.hadoop.io.BytesWritable;
-import org.apache.hadoop.io.DataInputBuffer;
-import org.apache.hadoop.io.RawComparator;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.io.serializer.Deserializer;
-import org.apache.hadoop.io.serializer.SerializationFactory;
-import org.apache.hadoop.mapred.IFile.Writer;
-import org.apache.hadoop.mapreduce.Counter;
-import org.apache.hadoop.mapreduce.FileSystemCounter;
-import org.apache.hadoop.mapreduce.OutputCommitter;
-import org.apache.hadoop.mapreduce.TaskCounter;
-import org.apache.hadoop.mapreduce.JobStatus;
-import org.apache.hadoop.mapreduce.MRConfig;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer;
-import org.apache.hadoop.mapreduce.task.ReduceContextImpl;
-import org.apache.hadoop.mapreduce.util.ResourceCalculatorPlugin;
-import org.apache.hadoop.mapreduce.util.ResourceCalculatorPlugin.*;
-import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.util.Progress;
-import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.hadoop.util.StringUtils;
-
-/**
- * Base class for tasks.
- */
-@InterfaceAudience.LimitedPrivate({"MapReduce"})
-@InterfaceStability.Unstable
-abstract public class Task implements Writable, Configurable {
- private static final Log LOG =
- LogFactory.getLog(Task.class);
-
- public static String MERGED_OUTPUT_PREFIX = ".merged";
- public static final long DEFAULT_COMBINE_RECORDS_BEFORE_PROGRESS = 10000;
-
- /**
- * Counters to measure the usage of the different file systems.
- * Always return the String array with two elements. First one is the name of
- * BYTES_READ counter and second one is of the BYTES_WRITTEN counter.
- */
- protected static String[] getFileSystemCounterNames(String uriScheme) {
- String scheme = uriScheme.toUpperCase();
- return new String[]{scheme+"_BYTES_READ", scheme+"_BYTES_WRITTEN"};
- }
-
- /**
- * Name of the FileSystem counters' group
- */
- protected static final String FILESYSTEM_COUNTER_GROUP = "FileSystemCounters";
-
- ///////////////////////////////////////////////////////////
- // Helper methods to construct task-output paths
- ///////////////////////////////////////////////////////////
-
- /** Construct output file names so that, when an output directory listing is
- * sorted lexicographically, positions correspond to output partitions.*/
- private static final NumberFormat NUMBER_FORMAT = NumberFormat.getInstance();
- static {
- NUMBER_FORMAT.setMinimumIntegerDigits(5);
- NUMBER_FORMAT.setGroupingUsed(false);
- }
-
- static synchronized String getOutputName(int partition) {
- return "part-" + NUMBER_FORMAT.format(partition);
- }
-
- ////////////////////////////////////////////
- // Fields
- ////////////////////////////////////////////
-
- private String jobFile; // job configuration file
- private String user; // user running the job
- private TaskAttemptID taskId; // unique, includes job id
- private int partition; // id within job
- TaskStatus taskStatus; // current status of the task
- protected JobStatus.State jobRunStateForCleanup;
- protected boolean jobCleanup = false;
- protected boolean jobSetup = false;
- protected boolean taskCleanup = false;
-
- // An opaque data field used to attach extra data to each task. This is used
- // by the Hadoop scheduler for Mesos to associate a Mesos task ID with each
- // task and recover these IDs on the TaskTracker.
- protected BytesWritable extraData = new BytesWritable();
-
- //skip ranges based on failed ranges from previous attempts
- private SortedRanges skipRanges = new SortedRanges();
- private boolean skipping = false;
- private boolean writeSkipRecs = true;
-
- //currently processing record start index
- private volatile long currentRecStartIndex;
- private Iterator<Long> currentRecIndexIterator =
- skipRanges.skipRangeIterator();
-
- private ResourceCalculatorPlugin resourceCalculator = null;
- private long initCpuCumulativeTime = 0;
-
- protected JobConf conf;
- protected MapOutputFile mapOutputFile;
- protected LocalDirAllocator lDirAlloc;
- private final static int MAX_RETRIES = 10;
- protected JobContext jobContext;
- protected TaskAttemptContext taskContext;
- protected org.apache.hadoop.mapreduce.OutputFormat<?,?> outputFormat;
- protected org.apache.hadoop.mapreduce.OutputCommitter committer;
- protected final Counters.Counter spilledRecordsCounter;
- protected final Counters.Counter failedShuffleCounter;
- protected final Counters.Counter mergedMapOutputsCounter;
- private int numSlotsRequired;
- protected TaskUmbilicalProtocol umbilical;
- protected SecretKey tokenSecret;
- protected GcTimeUpdater gcUpdater;
-
- ////////////////////////////////////////////
- // Constructors
- ////////////////////////////////////////////
-
- public Task() {
- taskStatus = TaskStatus.createTaskStatus(isMapTask());
- taskId = new TaskAttemptID();
- spilledRecordsCounter =
- counters.findCounter(TaskCounter.SPILLED_RECORDS);
- failedShuffleCounter =
- counters.findCounter(TaskCounter.FAILED_SHUFFLE);
- mergedMapOutputsCounter =
- counters.findCounter(TaskCounter.MERGED_MAP_OUTPUTS);
- gcUpdater = new GcTimeUpdater();
- }
-
- public Task(String jobFile, TaskAttemptID taskId, int partition,
- int numSlotsRequired) {
- this.jobFile = jobFile;
- this.taskId = taskId;
-
- this.partition = partition;
- this.numSlotsRequired = numSlotsRequired;
- this.taskStatus = TaskStatus.createTaskStatus(isMapTask(), this.taskId,
- 0.0f, numSlotsRequired,
- TaskStatus.State.UNASSIGNED,
- "", "", "",
- isMapTask() ?
- TaskStatus.Phase.MAP :
- TaskStatus.Phase.SHUFFLE,
- counters);
- spilledRecordsCounter = counters.findCounter(TaskCounter.SPILLED_RECORDS);
- failedShuffleCounter = counters.findCounter(TaskCounter.FAILED_SHUFFLE);
- mergedMapOutputsCounter =
- counters.findCounter(TaskCounter.MERGED_MAP_OUTPUTS);
- gcUpdater = new GcTimeUpdater();
- }
-
- ////////////////////////////////////////////
- // Accessors
- ////////////////////////////////////////////
- public void setJobFile(String jobFile) { this.jobFile = jobFile; }
- public String getJobFile() { return jobFile; }
- public TaskAttemptID getTaskID() { return taskId; }
- public int getNumSlotsRequired() {
- return numSlotsRequired;
- }
-
- Counters getCounters() { return counters; }
-
- /**
- * Get the job name for this task.
- * @return the job name
- */
- public JobID getJobID() {
- return taskId.getJobID();
- }
-
- /**
- * Set the job token secret
- * @param tokenSecret the secret
- */
- public void setJobTokenSecret(SecretKey tokenSecret) {
- this.tokenSecret = tokenSecret;
- }
-
- /**
- * Get the job token secret
- * @return the token secret
- */
- public SecretKey getJobTokenSecret() {
- return this.tokenSecret;
- }
-
-
- /**
- * Get the index of this task within the job.
- * @return the integer part of the task id
- */
- public int getPartition() {
- return partition;
- }
- /**
- * Return current phase of the task.
- * needs to be synchronized as communication thread sends the phase every second
- * @return the curent phase of the task
- */
- public synchronized TaskStatus.Phase getPhase(){
- return this.taskStatus.getPhase();
- }
- /**
- * Set current phase of the task.
- * @param phase task phase
- */
- protected synchronized void setPhase(TaskStatus.Phase phase){
- this.taskStatus.setPhase(phase);
- }
-
- /**
- * Get whether to write skip records.
- */
- protected boolean toWriteSkipRecs() {
- return writeSkipRecs;
- }
-
- /**
- * Set whether to write skip records.
- */
- protected void setWriteSkipRecs(boolean writeSkipRecs) {
- this.writeSkipRecs = writeSkipRecs;
- }
-
- /**
- * Report a fatal error to the parent (task) tracker.
- */
- protected void reportFatalError(TaskAttemptID id, Throwable throwable,
- String logMsg) {
- LOG.fatal(logMsg);
- Throwable tCause = throwable.getCause();
- String cause = tCause == null
- ? StringUtils.stringifyException(throwable)
- : StringUtils.stringifyException(tCause);
- try {
- umbilical.fatalError(id, cause);
- } catch (IOException ioe) {
- LOG.fatal("Failed to contact the tasktracker", ioe);
- System.exit(-1);
- }
- }
-
- /**
- * Gets a handle to the Statistics instance based on the scheme associated
- * with path.
- *
- * @param path the path.
- * @param conf the configuration to extract the scheme from if not part of
- * the path.
- * @return a Statistics instance, or null if none is found for the scheme.
- */
- protected static Statistics getFsStatistics(Path path, Configuration conf) throws IOException {
- Statistics matchedStats = null;
- path = path.getFileSystem(conf).makeQualified(path);
- String scheme = path.toUri().getScheme();
- for (Statistics stats : FileSystem.getAllStatistics()) {
- if (stats.getScheme().equals(scheme)) {
- matchedStats = stats;
- break;
- }
- }
- return matchedStats;
- }
-
- /**
- * Get skipRanges.
- */
- public SortedRanges getSkipRanges() {
- return skipRanges;
- }
-
- /**
- * Set skipRanges.
- */
- public void setSkipRanges(SortedRanges skipRanges) {
- this.skipRanges = skipRanges;
- }
-
- /**
- * Is Task in skipping mode.
- */
- public boolean isSkipping() {
- return skipping;
- }
-
- /**
- * Sets whether to run Task in skipping mode.
- * @param skipping
- */
- public void setSkipping(boolean skipping) {
- this.skipping = skipping;
- }
-
- /**
- * Return current state of the task.
- * needs to be synchronized as communication thread
- * sends the state every second
- * @return
- */
- synchronized TaskStatus.State getState(){
- return this.taskStatus.getRunState();
- }
- /**
- * Set current state of the task.
- * @param state
- */
- synchronized void setState(TaskStatus.State state){
- this.taskStatus.setRunState(state);
- }
-
- void setTaskCleanupTask() {
- taskCleanup = true;
- }
-
- boolean isTaskCleanupTask() {
- return taskCleanup;
- }
-
- boolean isJobCleanupTask() {
- return jobCleanup;
- }
-
- boolean isJobAbortTask() {
- // the task is an abort task if its marked for cleanup and the final
- // expected state is either failed or killed.
- return isJobCleanupTask()
- && (jobRunStateForCleanup == JobStatus.State.KILLED
- || jobRunStateForCleanup == JobStatus.State.FAILED);
- }
-
- boolean isJobSetupTask() {
- return jobSetup;
- }
-
- void setJobSetupTask() {
- jobSetup = true;
- }
-
- void setJobCleanupTask() {
- jobCleanup = true;
- }
-
- /**
- * Sets the task to do job abort in the cleanup.
- * @param status the final runstate of the job.
- */
- void setJobCleanupTaskState(JobStatus.State status) {
- jobRunStateForCleanup = status;
- }
-
- boolean isMapOrReduce() {
- return !jobSetup && !jobCleanup && !taskCleanup;
- }
-
- /**
- * Get the name of the user running the job/task. TaskTracker needs task's
- * user name even before it's JobConf is localized. So we explicitly serialize
- * the user name.
- *
- * @return user
- */
- String getUser() {
- return user;
- }
-
- void setUser(String user) {
- this.user = user;
- }
-
- ////////////////////////////////////////////
- // Writable methods
- ////////////////////////////////////////////
-
- public void write(DataOutput out) throws IOException {
- Text.writeString(out, jobFile);
- taskId.write(out);
- out.writeInt(partition);
- out.writeInt(numSlotsRequired);
- taskStatus.write(out);
- skipRanges.write(out);
- out.writeBoolean(skipping);
- out.writeBoolean(jobCleanup);
- if (jobCleanup) {
- WritableUtils.writeEnum(out, jobRunStateForCleanup);
- }
- out.writeBoolean(jobSetup);
- out.writeBoolean(writeSkipRecs);
- out.writeBoolean(taskCleanup);
- Text.writeString(out, user);
- extraData.write(out);
- }
-
- public void readFields(DataInput in) throws IOException {
- jobFile = Text.readString(in);
- taskId = TaskAttemptID.read(in);
- partition = in.readInt();
- numSlotsRequired = in.readInt();
- taskStatus.readFields(in);
- skipRanges.readFields(in);
- currentRecIndexIterator = skipRanges.skipRangeIterator();
- currentRecStartIndex = currentRecIndexIterator.next();
- skipping = in.readBoolean();
- jobCleanup = in.readBoolean();
- if (jobCleanup) {
- jobRunStateForCleanup =
- WritableUtils.readEnum(in, JobStatus.State.class);
- }
- jobSetup = in.readBoolean();
- writeSkipRecs = in.readBoolean();
- taskCleanup = in.readBoolean();
- if (taskCleanup) {
- setPhase(TaskStatus.Phase.CLEANUP);
- }
- user = Text.readString(in);
- extraData.readFields(in);
- }
-
- @Override
- public String toString() { return taskId.toString(); }
-
- /**
- * Localize the given JobConf to be specific for this task.
- */
- public void localizeConfiguration(JobConf conf) throws IOException {
- conf.set(JobContext.TASK_ID, taskId.getTaskID().toString());
- conf.set(JobContext.TASK_ATTEMPT_ID, taskId.toString());
- conf.setBoolean(JobContext.TASK_ISMAP, isMapTask());
- conf.setInt(JobContext.TASK_PARTITION, partition);
- conf.set(JobContext.ID, taskId.getJobID().toString());
- }
-
- /** Run this task as a part of the named job. This method is executed in the
- * child process and is what invokes user-supplied map, reduce, etc. methods.
- * @param umbilical for progress reports
- */
- public abstract void run(JobConf job, TaskUmbilicalProtocol umbilical)
- throws IOException, ClassNotFoundException, InterruptedException;
-
-
- /** Return an approprate thread runner for this task.
- * @param tip TODO*/
- public abstract TaskRunner createRunner(TaskTracker tracker,
- TaskTracker.TaskInProgress tip) throws IOException;
-
- /** The number of milliseconds between progress reports. */
- public static final int PROGRESS_INTERVAL = 3000;
-
- private transient Progress taskProgress = new Progress();
-
- // Current counters
- private transient Counters counters = new Counters();
-
- /* flag to track whether task is done */
- private AtomicBoolean taskDone = new AtomicBoolean(false);
-
- public abstract boolean isMapTask();
-
- public Progress getProgress() { return taskProgress; }
-
- public void initialize(JobConf job, JobID id,
- Reporter reporter,
- boolean useNewApi) throws IOException,
- ClassNotFoundException,
- InterruptedException {
- jobContext = new JobContextImpl(job, id, reporter);
- taskContext = new TaskAttemptContextImpl(job, taskId, reporter);
- if (getState() == TaskStatus.State.UNASSIGNED) {
- setState(TaskStatus.State.RUNNING);
- }
- if (useNewApi) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("using new api for output committer");
- }
- outputFormat =
- ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), job);
- committer = outputFormat.getOutputCommitter(taskContext);
- } else {
- committer = conf.getOutputCommitter();
- }
- Path outputPath = FileOutputFormat.getOutputPath(conf);
- if (outputPath != null) {
- if ((committer instanceof FileOutputCommitter)) {
- FileOutputFormat.setWorkOutputPath(conf,
- ((FileOutputCommitter)committer).getTempTaskOutputPath(taskContext));
- } else {
- FileOutputFormat.setWorkOutputPath(conf, outputPath);
- }
- }
- committer.setupTask(taskContext);
- Class<? extends ResourceCalculatorPlugin> clazz =
- conf.getClass(TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN,
- null, ResourceCalculatorPlugin.class);
- resourceCalculator = ResourceCalculatorPlugin
- .getResourceCalculatorPlugin(clazz, conf);
- LOG.info(" Using ResourceCalculatorPlugin : " + resourceCalculator);
- if (resourceCalculator != null) {
- initCpuCumulativeTime =
- resourceCalculator.getProcResourceValues().getCumulativeCpuTime();
- }
- }
-
- @InterfaceAudience.Private
- @InterfaceStability.Unstable
- protected class TaskReporter
- extends org.apache.hadoop.mapreduce.StatusReporter
- implements Runnable, Reporter {
- private TaskUmbilicalProtocol umbilical;
- private InputSplit split = null;
- private Progress taskProgress;
- private Thread pingThread = null;
-
- /**
- * flag that indicates whether progress update needs to be sent to parent.
- * If true, it has been set. If false, it has been reset.
- * Using AtomicBoolean since we need an atomic read & reset method.
- */
- private AtomicBoolean progressFlag = new AtomicBoolean(false);
-
- TaskReporter(Progress taskProgress,
- TaskUmbilicalProtocol umbilical) {
- this.umbilical = umbilical;
- this.taskProgress = taskProgress;
- }
-
- // getters and setters for flag
- void setProgressFlag() {
- progressFlag.set(true);
- }
- boolean resetProgressFlag() {
- return progressFlag.getAndSet(false);
- }
- public void setStatus(String status) {
- taskProgress.setStatus(status);
- // indicate that progress update needs to be sent
- setProgressFlag();
- }
- public void setProgress(float progress) {
- // set current phase progress.
- // This method assumes that task has phases.
- taskProgress.phase().set(progress);
- // indicate that progress update needs to be sent
- setProgressFlag();
- }
-
- public float getProgress() {
- return taskProgress.getProgress();
- };
-
- public void progress() {
- // indicate that progress update needs to be sent
- setProgressFlag();
- }
- public Counters.Counter getCounter(String group, String name) {
- Counters.Counter counter = null;
- if (counters != null) {
- counter = counters.findCounter(group, name);
- }
- return counter;
- }
- public Counters.Counter getCounter(Enum<?> name) {
- return counters == null ? null : counters.findCounter(name);
- }
- public void incrCounter(Enum key, long amount) {
- if (counters != null) {
- counters.incrCounter(key, amount);
- }
- setProgressFlag();
- }
- public void incrCounter(String group, String counter, long amount) {
- if (counters != null) {
- counters.incrCounter(group, counter, amount);
- }
- if(skipping && SkipBadRecords.COUNTER_GROUP.equals(group) && (
- SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS.equals(counter) ||
- SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS.equals(counter))) {
- //if application reports the processed records, move the
- //currentRecStartIndex to the next.
- //currentRecStartIndex is the start index which has not yet been
- //finished and is still in task's stomach.
- for(int i=0;i<amount;i++) {
- currentRecStartIndex = currentRecIndexIterator.next();
- }
- }
- setProgressFlag();
- }
- public void setInputSplit(InputSplit split) {
- this.split = split;
- }
- public InputSplit getInputSplit() throws UnsupportedOperationException {
- if (split == null) {
- throw new UnsupportedOperationException("Input only available on map");
- } else {
- return split;
- }
- }
- /**
- * The communication thread handles communication with the parent (Task Tracker).
- * It sends progress updates if progress has been made or if the task needs to
- * let the parent know that it's alive. It also pings the parent to see if it's alive.
- */
- public void run() {
- final int MAX_RETRIES = 3;
- int remainingRetries = MAX_RETRIES;
- // get current flag value and reset it as well
- boolean sendProgress = resetProgressFlag();
- while (!taskDone.get()) {
- try {
- boolean taskFound = true; // whether TT knows about this task
- // sleep for a bit
- try {
- Thread.sleep(PROGRESS_INTERVAL);
- }
- catch (InterruptedException e) {
- if (LOG.isDebugEnabled()) {
- LOG.debug(getTaskID() + " Progress/ping thread exiting " +
- "since it got interrupted");
- }
- break;
- }
-
- if (sendProgress) {
- // we need to send progress update
- updateCounters();
- taskStatus.statusUpdate(taskProgress.get(),
- taskProgress.toString(),
- counters);
- taskFound = umbilical.statusUpdate(taskId, taskStatus);
- taskStatus.clearStatus();
- }
- else {
- // send ping
- taskFound = umbilical.ping(taskId);
- }
-
- // if Task Tracker is not aware of our task ID (probably because it died and
- // came back up), kill ourselves
- if (!taskFound) {
- LOG.warn("Parent died. Exiting "+taskId);
- System.exit(66);
- }
-
- sendProgress = resetProgressFlag();
- remainingRetries = MAX_RETRIES;
- }
- catch (Throwable t) {
- LOG.info("Communication exception: " + StringUtils.stringifyException(t));
- remainingRetries -=1;
- if (remainingRetries == 0) {
- ReflectionUtils.logThreadInfo(LOG, "Communication exception", 0);
- LOG.warn("Last retry, killing "+taskId);
- System.exit(65);
- }
- }
- }
- }
- public void startCommunicationThread() {
- if (pingThread == null) {
- pingThread = new Thread(this, "communication thread");
- pingThread.setDaemon(true);
- pingThread.start();
- }
- }
- public void stopCommunicationThread() throws InterruptedException {
- if (pingThread != null) {
- pingThread.interrupt();
- pingThread.join();
- }
- }
- }
-
- /**
- * Reports the next executing record range to TaskTracker.
- *
- * @param umbilical
- * @param nextRecIndex the record index which would be fed next.
- * @throws IOException
- */
- protected void reportNextRecordRange(final TaskUmbilicalProtocol umbilical,
- long nextRecIndex) throws IOException{
- //currentRecStartIndex is the start index which has not yet been finished
- //and is still in task's stomach.
- long len = nextRecIndex - currentRecStartIndex +1;
- SortedRanges.Range range =
- new SortedRanges.Range(currentRecStartIndex, len);
- taskStatus.setNextRecordRange(range);
- if (LOG.isDebugEnabled()) {
- LOG.debug("sending reportNextRecordRange " + range);
- }
- umbilical.reportNextRecordRange(taskId, range);
- }
-
- /**
- * Create a TaskReporter and start communication thread
- */
- TaskReporter startReporter(final TaskUmbilicalProtocol umbilical) {
- // start thread that will handle communication with parent
- TaskReporter reporter = new TaskReporter(getProgress(), umbilical);
- reporter.startCommunicationThread();
- return reporter;
- }
-
- /**
- * Update resource information counters
- */
- void updateResourceCounters() {
- // Update generic resource counters
- updateHeapUsageCounter();
-
- // Updating resources specified in ResourceCalculatorPlugin
- if (resourceCalculator == null) {
- return;
- }
- ProcResourceValues res = resourceCalculator.getProcResourceValues();
- long cpuTime = res.getCumulativeCpuTime();
- long pMem = res.getPhysicalMemorySize();
- long vMem = res.getVirtualMemorySize();
- // Remove the CPU time consumed previously by JVM reuse
- cpuTime -= initCpuCumulativeTime;
- counters.findCounter(TaskCounter.CPU_MILLISECONDS).setValue(cpuTime);
- counters.findCounter(TaskCounter.PHYSICAL_MEMORY_BYTES).setValue(pMem);
- counters.findCounter(TaskCounter.VIRTUAL_MEMORY_BYTES).setValue(vMem);
- }
-
- /**
- * An updater that tracks the amount of time this task has spent in GC.
- */
- class GcTimeUpdater {
- private long lastGcMillis = 0;
- private List<GarbageCollectorMXBean> gcBeans = null;
-
- public GcTimeUpdater() {
- this.gcBeans = ManagementFactory.getGarbageCollectorMXBeans();
- getElapsedGc(); // Initialize 'lastGcMillis' with the current time spent.
- }
-
- /**
- * @return the number of milliseconds that the gc has used for CPU
- * since the last time this method was called.
- */
- protected long getElapsedGc() {
- long thisGcMillis = 0;
- for (GarbageCollectorMXBean gcBean : gcBeans) {
- thisGcMillis += gcBean.getCollectionTime();
- }
-
- long delta = thisGcMillis - lastGcMillis;
- this.lastGcMillis = thisGcMillis;
- return delta;
- }
-
- /**
- * Increment the gc-elapsed-time counter.
- */
- public void incrementGcCounter() {
- if (null == counters) {
- return; // nothing to do.
- }
-
- Counter gcCounter = counters.findCounter(TaskCounter.GC_TIME_MILLIS);
- if (null != gcCounter) {
- gcCounter.increment(getElapsedGc());
- }
- }
- }
-
- /**
- * An updater that tracks the last number reported for a given file
- * system and only creates the counters when they are needed.
- */
- class FileSystemStatisticUpdater {
- private FileSystem.Statistics stats;
- private Counters.Counter readBytesCounter, writeBytesCounter,
- readOpsCounter, largeReadOpsCounter, writeOpsCounter;
-
- FileSystemStatisticUpdater(FileSystem.Statistics stats) {
- this.stats = stats;
- }
-
- void updateCounters() {
- String scheme = stats.getScheme();
- if (readBytesCounter == null) {
- readBytesCounter = counters.findCounter(scheme,
- FileSystemCounter.BYTES_READ);
- }
- readBytesCounter.setValue(stats.getBytesRead());
- if (writeBytesCounter == null) {
- writeBytesCounter = counters.findCounter(scheme,
- FileSystemCounter.BYTES_WRITTEN);
- }
- writeBytesCounter.setValue(stats.getBytesWritten());
- if (readOpsCounter == null) {
- readOpsCounter = counters.findCounter(scheme,
- FileSystemCounter.READ_OPS);
- }
- readOpsCounter.setValue(stats.getReadOps());
- if (largeReadOpsCounter == null) {
- largeReadOpsCounter = counters.findCounter(scheme,
- FileSystemCounter.LARGE_READ_OPS);
- }
- largeReadOpsCounter.setValue(stats.getLargeReadOps());
- if (writeOpsCounter == null) {
- writeOpsCounter = counters.findCounter(scheme,
- FileSystemCounter.WRITE_OPS);
- }
- writeOpsCounter.setValue(stats.getWriteOps());
- }
- }
-
- /**
- * A Map where Key-> URIScheme and value->FileSystemStatisticUpdater
- */
- private Map<String, FileSystemStatisticUpdater> statisticUpdaters =
- new HashMap<String, FileSystemStatisticUpdater>();
-
- private synchronized void updateCounters() {
- for(Statistics stat: FileSystem.getAllStatistics()) {
- String uriScheme = stat.getScheme();
- FileSystemStatisticUpdater updater = statisticUpdaters.get(uriScheme);
- if(updater==null) {//new FileSystem has been found in the cache
- updater = new FileSystemStatisticUpdater(stat);
- statisticUpdaters.put(uriScheme, updater);
- }
- updater.updateCounters();
- }
-
- gcUpdater.incrementGcCounter();
- updateResourceCounters();
- }
-
- /**
- * Updates the {@link TaskCounter#COMMITTED_HEAP_BYTES} counter to reflect the
- * current total committed heap space usage of this JVM.
- */
- @SuppressWarnings("deprecation")
- private void updateHeapUsageCounter() {
- long currentHeapUsage = Runtime.getRuntime().totalMemory();
- counters.findCounter(TaskCounter.COMMITTED_HEAP_BYTES)
- .setValue(currentHeapUsage);
- }
-
- public void done(TaskUmbilicalProtocol umbilical,
- TaskReporter reporter
- ) throws IOException, InterruptedException {
- LOG.info("Task:" + taskId + " is done."
- + " And is in the process of commiting");
- updateCounters();
-
- boolean commitRequired = isCommitRequired();
- if (commitRequired) {
- int retries = MAX_RETRIES;
- setState(TaskStatus.State.COMMIT_PENDING);
- // say the task tracker that task is commit pending
- while (true) {
- try {
- umbilical.commitPending(taskId, taskStatus);
- break;
- } catch (InterruptedException ie) {
- // ignore
- } catch (IOException ie) {
- LOG.warn("Failure sending commit pending: " +
- StringUtils.stringifyException(ie));
- if (--retries == 0) {
- System.exit(67);
- }
- }
- }
- //wait for commit approval and commit
- commit(umbilical, reporter, committer);
- }
- taskDone.set(true);
- reporter.stopCommunicationThread();
- // Make sure we send at least one set of counter increments. It's
- // ok to call updateCounters() in this thread after comm thread stopped.
- updateCounters();
- sendLastUpdate(umbilical);
- //signal the tasktracker that we are done
- sendDone(umbilical);
- }
-
- /**
- * Checks if this task has anything to commit, depending on the
- * type of task, as well as on whether the {@link OutputCommitter}
- * has anything to commit.
- *
- * @return true if the task has to commit
- * @throws IOException
- */
- boolean isCommitRequired() throws IOException {
- boolean commitRequired = false;
- if (isMapOrReduce()) {
- commitRequired = committer.needsTaskCommit(taskContext);
- }
- return commitRequired;
- }
-
- /**
- * Send a status update to the task tracker
- * @param umbilical
- * @throws IOException
- */
- public void statusUpdate(TaskUmbilicalProtocol umbilical)
- throws IOException {
- int retries = MAX_RETRIES;
- while (true) {
- try {
- if (!umbilical.statusUpdate(getTaskID(), taskStatus)) {
- LOG.warn("Parent died. Exiting "+taskId);
- System.exit(66);
- }
- taskStatus.clearStatus();
- return;
- } catch (InterruptedException ie) {
- Thread.currentThread().interrupt(); // interrupt ourself
- } catch (IOException ie) {
- LOG.warn("Failure sending status update: " +
- StringUtils.stringifyException(ie));
- if (--retries == 0) {
- throw ie;
- }
- }
- }
- }
-
- /**
- * Sends last status update before sending umbilical.done();
- */
- private void sendLastUpdate(TaskUmbilicalProtocol umbilical)
- throws IOException {
- taskStatus.setOutputSize(calculateOutputSize());
- // send a final status report
- taskStatus.statusUpdate(taskProgress.get(),
- taskProgress.toString(),
- counters);
- statusUpdate(umbilical);
- }
-
- /**
- * Calculates the size of output for this task.
- *
- * @return -1 if it can't be found.
- */
- private long calculateOutputSize() throws IOException {
- if (!isMapOrReduce()) {
- return -1;
- }
-
- if (isMapTask() && conf.getNumReduceTasks() > 0) {
- try {
- Path mapOutput = mapOutputFile.getOutputFile();
- FileSystem localFS = FileSystem.getLocal(conf);
- return localFS.getFileStatus(mapOutput).getLen();
- } catch (IOException e) {
- LOG.warn ("Could not find output size " , e);
- }
- }
- return -1;
- }
-
- private void sendDone(TaskUmbilicalProtocol umbilical) throws IOException {
- int retries = MAX_RETRIES;
- while (true) {
- try {
- umbilical.done(getTaskID());
- LOG.info("Task '" + taskId + "' done.");
- return;
- } catch (IOException ie) {
- LOG.warn("Failure signalling completion: " +
- StringUtils.stringifyException(ie));
- if (--retries == 0) {
- throw ie;
- }
- }
- }
- }
-
- private void commit(TaskUmbilicalProtocol umbilical,
- TaskReporter reporter,
- org.apache.hadoop.mapreduce.OutputCommitter committer
- ) throws IOException {
- int retries = MAX_RETRIES;
- while (true) {
- try {
- while (!umbilical.canCommit(taskId)) {
- try {
- Thread.sleep(1000);
- } catch(InterruptedException ie) {
- //ignore
- }
- reporter.setProgressFlag();
- }
- break;
- } catch (IOException ie) {
- LOG.warn("Failure asking whether task can commit: " +
- StringUtils.stringifyException(ie));
- if (--retries == 0) {
- //if it couldn't query successfully then delete the output
- discardOutput(taskContext);
- System.exit(68);
- }
- }
- }
-
- // task can Commit now
- try {
- LOG.info("Task " + taskId + " is allowed to commit now");
- committer.commitTask(taskContext);
- return;
- } catch (IOException iee) {
- LOG.warn("Failure committing: " +
- StringUtils.stringifyException(iee));
- //if it couldn't commit a successfully then delete the output
- discardOutput(taskContext);
- throw iee;
- }
- }
-
- private
- void discardOutput(TaskAttemptContext taskContext) {
- try {
- committer.abortTask(taskContext);
- } catch (IOException ioe) {
- LOG.warn("Failure cleaning up: " +
- StringUtils.stringifyException(ioe));
- }
- }
-
- protected void runTaskCleanupTask(TaskUmbilicalProtocol umbilical,
- TaskReporter reporter)
- throws IOException, InterruptedException {
- taskCleanup(umbilical);
- done(umbilical, reporter);
- }
-
- void taskCleanup(TaskUmbilicalProtocol umbilical)
- throws IOException {
- // set phase for this task
- setPhase(TaskStatus.Phase.CLEANUP);
- getProgress().setStatus("cleanup");
- statusUpdate(umbilical);
- LOG.info("Runnning cleanup for the task");
- // do the cleanup
- committer.abortTask(taskContext);
- }
-
- protected void runJobCleanupTask(TaskUmbilicalProtocol umbilical,
- TaskReporter reporter
- ) throws IOException, InterruptedException {
- // set phase for this task
- setPhase(TaskStatus.Phase.CLEANUP);
- getProgress().setStatus("cleanup");
- statusUpdate(umbilical);
- // do the cleanup
- LOG.info("Cleaning up job");
- if (jobRunStateForCleanup == JobStatus.State.FAILED
- || jobRunStateForCleanup == JobStatus.State.KILLED) {
- LOG.info("Aborting job with runstate : " + jobRunStateForCleanup.name());
- if (conf.getUseNewMapper()) {
- committer.abortJob(jobContext, jobRunStateForCleanup);
- } else {
- org.apache.hadoop.mapred.OutputCommitter oldCommitter =
- (org.apache.hadoop.mapred.OutputCommitter)committer;
- oldCommitter.abortJob(jobContext, jobRunStateForCleanup);
- }
- } else if (jobRunStateForCleanup == JobStatus.State.SUCCEEDED){
- LOG.info("Committing job");
- committer.commitJob(jobContext);
- } else {
- throw new IOException("Invalid state of the job for cleanup. State found "
- + jobRunStateForCleanup + " expecting "
- + JobStatus.State.SUCCEEDED + ", "
- + JobStatus.State.FAILED + " or "
- + JobStatus.State.KILLED);
- }
-
- // delete the staging area for the job
- JobConf conf = new JobConf(jobContext.getConfiguration());
- if (!keepTaskFiles(conf)) {
- String jobTempDir = conf.get("mapreduce.job.dir");
- Path jobTempDirPath = new Path(jobTempDir);
- FileSystem fs = jobTempDirPath.getFileSystem(conf);
- fs.delete(jobTempDirPath, true);
- }
- done(umbilical, reporter);
- }
-
- protected boolean keepTaskFiles(JobConf conf) {
- return (conf.getKeepTaskFilesPattern() != null || conf
- .getKeepFailedTaskFiles());
- }
-
- protected void runJobSetupTask(TaskUmbilicalProtocol umbilical,
- TaskReporter reporter
- ) throws IOException, InterruptedException {
- // do the setup
- getProgress().setStatus("setup");
- committer.setupJob(jobContext);
- done(umbilical, reporter);
- }
-
- public void setConf(Configuration conf) {
- if (conf instanceof JobConf) {
- this.conf = (JobConf) conf;
- } else {
- this.conf = new JobConf(conf);
- }
- this.mapOutputFile = ReflectionUtils.newInstance(
- conf.getClass(MRConfig.TASK_LOCAL_OUTPUT_CLASS,
- MROutputFiles.class, MapOutputFile.class), conf);
- this.lDirAlloc = new LocalDirAllocator(MRConfig.LOCAL_DIR);
- // add the static resolutions (this is required for the junit to
- // work on testcases that simulate multiple nodes on a single physical
- // node.
- String hostToResolved[] = conf.getStrings(TTConfig.TT_STATIC_RESOLUTIONS);
- if (hostToResolved != null) {
- for (String str : hostToResolved) {
- String name = str.substring(0, str.indexOf('='));
- String resolvedName = str.substring(str.indexOf('=') + 1);
- NetUtils.addStaticResolution(name, resolvedName);
- }
- }
- }
-
- public Configuration getConf() {
- return this.conf;
- }
-
- /**
- * OutputCollector for the combiner.
- */
- @InterfaceAudience.Private
- @InterfaceStability.Unstable
- public static class CombineOutputCollector<K extends Object, V extends Object>
- implements OutputCollector<K, V> {
- private Writer<K, V> writer;
- private Counters.Counter outCounter;
- private Progressable progressable;
- private long progressBar;
-
- public CombineOutputCollector(Counters.Counter outCounter, Progressable progressable, Configuration conf) {
- this.outCounter = outCounter;
- this.progressable=progressable;
- progressBar = conf.getLong(MRJobConfig.COMBINE_RECORDS_BEFORE_PROGRESS, DEFAULT_COMBINE_RECORDS_BEFORE_PROGRESS);
- }
-
- public synchronized void setWriter(Writer<K, V> writer) {
- this.writer = writer;
- }
-
- public synchronized void collect(K key, V value)
- throws IOException {
- outCounter.increment(1);
- writer.append(key, value);
- if ((outCounter.getValue() % progressBar) == 0) {
- progressable.progress();
- }
- }
- }
-
- /** Iterates values while keys match in sorted input. */
- static class ValuesIterator<KEY,VALUE> implements Iterator<VALUE> {
- protected RawKeyValueIterator in; //input iterator
- private KEY key; // current key
- private KEY nextKey;
- private VALUE value; // current value
- private boolean hasNext; // more w/ this key
- private boolean more; // more in file
- private RawComparator<KEY> comparator;
- protected Progressable reporter;
- private Deserializer<KEY> keyDeserializer;
- private Deserializer<VALUE> valDeserializer;
- private DataInputBuffer keyIn = new DataInputBuffer();
- private DataInputBuffer valueIn = new DataInputBuffer();
-
- public ValuesIterator (RawKeyValueIterator in,
- RawComparator<KEY> comparator,
- Class<KEY> keyClass,
- Class<VALUE> valClass, Configuration conf,
- Progressable reporter)
- throws IOException {
- this.in = in;
- this.comparator = comparator;
- this.reporter = reporter;
- SerializationFactory serializationFactory = new SerializationFactory(conf);
- this.keyDeserializer = serializationFactory.getDeserializer(keyClass);
- this.keyDeserializer.open(keyIn);
- this.valDeserializer = serializationFactory.getDeserializer(valClass);
- this.valDeserializer.open(this.valueIn);
- readNextKey();
- key = nextKey;
- nextKey = null; // force new instance creation
- hasNext = more;
- }
-
- RawKeyValueIterator getRawIterator() { return in; }
-
- /// Iterator methods
-
- public boolean hasNext() { return hasNext; }
-
- private int ctr = 0;
- public VALUE next() {
- if (!hasNext) {
- throw new NoSuchElementException("iterate past last value");
- }
- try {
- readNextValue();
- readNextKey();
- } catch (IOException ie) {
- throw new RuntimeException("problem advancing post rec#"+ctr, ie);
- }
- reporter.progress();
- return value;
- }
-
- public void remove() { throw new RuntimeException("not implemented"); }
-
- /// Auxiliary methods
-
- /** Start processing next unique key. */
- public void nextKey() throws IOException {
- // read until we find a new key
- while (hasNext) {
- readNextKey();
- }
- ++ctr;
-
- // move the next key to the current one
- KEY tmpKey = key;
- key = nextKey;
- nextKey = tmpKey;
- hasNext = more;
- }
-
- /** True iff more keys remain. */
- public boolean more() {
- return more;
- }
-
- /** The current key. */
- public KEY getKey() {
- return key;
- }
-
- /**
- * read the next key
- */
- private void readNextKey() throws IOException {
- more = in.next();
- if (more) {
- DataInputBuffer nextKeyBytes = in.getKey();
- keyIn.reset(nextKeyBytes.getData(), nextKeyBytes.getPosition(), nextKeyBytes.getLength());
- nextKey = keyDeserializer.deserialize(nextKey);
- hasNext = key != null && (comparator.compare(key, nextKey) == 0);
- } else {
- hasNext = false;
- }
- }
-
- /**
- * Read the next value
- * @throws IOException
- */
- private void readNextValue() throws IOException {
- DataInputBuffer nextValueBytes = in.getValue();
- valueIn.reset(nextValueBytes.getData(), nextValueBytes.getPosition(), nextValueBytes.getLength());
- value = valDeserializer.deserialize(value);
- }
- }
-
- /** Iterator to return Combined values */
- @InterfaceAudience.Private
- @InterfaceStability.Unstable
- public static class CombineValuesIterator<KEY,VALUE>
- extends ValuesIterator<KEY,VALUE> {
-
- private final Counters.Counter combineInputCounter;
-
- public CombineValuesIterator(RawKeyValueIterator in,
- RawComparator<KEY> comparator, Class<KEY> keyClass,
- Class<VALUE> valClass, Configuration conf, Reporter reporter,
- Counters.Counter combineInputCounter) throws IOException {
- super(in, comparator, keyClass, valClass, conf, reporter);
- this.combineInputCounter = combineInputCounter;
- }
-
- public VALUE next() {
- combineInputCounter.increment(1);
- return super.next();
- }
- }
-
- @SuppressWarnings("unchecked")
- protected static <INKEY,INVALUE,OUTKEY,OUTVALUE>
- org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context
- createReduceContext(org.apache.hadoop.mapreduce.Reducer
- <INKEY,INVALUE,OUTKEY,OUTVALUE> reducer,
- Configuration job,
- org.apache.hadoop.mapreduce.TaskAttemptID taskId,
- RawKeyValueIterator rIter,
- org.apache.hadoop.mapreduce.Counter inputKeyCounter,
- org.apache.hadoop.mapreduce.Counter inputValueCounter,
- org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE> output,
- org.apache.hadoop.mapreduce.OutputCommitter committer,
- org.apache.hadoop.mapreduce.StatusReporter reporter,
- RawComparator<INKEY> comparator,
- Class<INKEY> keyClass, Class<INVALUE> valueClass
- ) throws IOException, InterruptedException {
- org.apache.hadoop.mapreduce.ReduceContext<INKEY, INVALUE, OUTKEY, OUTVALUE>
- reduceContext =
- new ReduceContextImpl<INKEY, INVALUE, OUTKEY, OUTVALUE>(job, taskId,
- rIter,
- inputKeyCounter,
- inputValueCounter,
- output,
- committer,
- reporter,
- comparator,
- keyClass,
- valueClass);
-
- org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context
- reducerContext =
- new WrappedReducer<INKEY, INVALUE, OUTKEY, OUTVALUE>().getReducerContext(
- reduceContext);
-
- return reducerContext;
- }
-
- @InterfaceAudience.Private
- @InterfaceStability.Unstable
- protected static abstract class CombinerRunner<K,V> {
- protected final Counters.Counter inputCounter;
- protected final JobConf job;
- protected final TaskReporter reporter;
-
- CombinerRunner(Counters.Counter inputCounter,
- JobConf job,
- TaskReporter reporter) {
- this.inputCounter = inputCounter;
- this.job = job;
- this.reporter = reporter;
- }
-
- /**
- * Run the combiner over a set of inputs.
- * @param iterator the key/value pairs to use as input
- * @param collector the output collector
- */
- abstract void combine(RawKeyValueIterator iterator,
- OutputCollector<K,V> collector
- ) throws IOException, InterruptedException,
- ClassNotFoundException;
-
- @SuppressWarnings("unchecked")
- static <K,V>
- CombinerRunner<K,V> create(JobConf job,
- TaskAttemptID taskId,
- Counters.Counter inputCounter,
- TaskReporter reporter,
- org.apache.hadoop.mapreduce.OutputCommitter committer
- ) throws ClassNotFoundException {
- Class<? extends Reducer<K,V,K,V>> cls =
- (Class<? extends Reducer<K,V,K,V>>) job.getCombinerClass();
-
- if (cls != null) {
- return new OldCombinerRunner(cls, job, inputCounter, reporter);
- }
- // make a task context so we can get the classes
- org.apache.hadoop.mapreduce.TaskAttemptContext taskContext =
- new org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl(job, taskId,
- reporter);
- Class<? extends org.apache.hadoop.mapreduce.Reducer<K,V,K,V>> newcls =
- (Class<? extends org.apache.hadoop.mapreduce.Reducer<K,V,K,V>>)
- taskContext.getCombinerClass();
- if (newcls != null) {
- return new NewCombinerRunner<K,V>(newcls, job, taskId, taskContext,
- inputCounter, reporter, committer);
- }
-
- return null;
- }
- }
-
- @InterfaceAudience.Private
- @InterfaceStability.Unstable
- protected static class OldCombinerRunner<K,V> extends CombinerRunner<K,V> {
- private final Class<? extends Reducer<K,V,K,V>> combinerClass;
- private final Class<K> keyClass;
- private final Class<V> valueClass;
- private final RawComparator<K> comparator;
-
- @SuppressWarnings("unchecked")
- protected OldCombinerRunner(Class<? extends Reducer<K,V,K,V>> cls,
- JobConf conf,
- Counters.Counter inputCounter,
- TaskReporter reporter) {
- super(inputCounter, conf, reporter);
- combinerClass = cls;
- keyClass = (Class<K>) job.getMapOutputKeyClass();
- valueClass = (Class<V>) job.getMapOutputValueClass();
- comparator = (RawComparator<K>) job.getOutputKeyComparator();
- }
-
- @SuppressWarnings("unchecked")
- protected void combine(RawKeyValueIterator kvIter,
- OutputCollector<K,V> combineCollector
- ) throws IOException {
- Reducer<K,V,K,V> combiner =
- ReflectionUtils.newInstance(combinerClass, job);
- try {
- CombineValuesIterator<K,V> values =
- new CombineValuesIterator<K,V>(kvIter, comparator, keyClass,
- valueClass, job, Reporter.NULL,
- inputCounter);
- while (values.more()) {
- combiner.reduce(values.getKey(), values, combineCollector,
- Reporter.NULL);
- values.nextKey();
- }
- } finally {
- combiner.close();
- }
- }
- }
-
- @InterfaceAudience.Private
- @InterfaceStability.Unstable
- protected static class NewCombinerRunner<K, V> extends CombinerRunner<K,V> {
- private final Class<? extends org.apache.hadoop.mapreduce.Reducer<K,V,K,V>>
- reducerClass;
- private final org.apache.hadoop.mapreduce.TaskAttemptID taskId;
- private final RawComparator<K> comparator;
- private final Class<K> keyClass;
- private final Class<V> valueClass;
- private final org.apache.hadoop.mapreduce.OutputCommitter committer;
-
- @SuppressWarnings("unchecked")
- NewCombinerRunner(Class reducerClass,
- JobConf job,
- org.apache.hadoop.mapreduce.TaskAttemptID taskId,
- org.apache.hadoop.mapreduce.TaskAttemptContext context,
- Counters.Counter inputCounter,
- TaskReporter reporter,
- org.apache.hadoop.mapreduce.OutputCommitter committer) {
- super(inputCounter, job, reporter);
- this.reducerClass = reducerClass;
- this.taskId = taskId;
- keyClass = (Class<K>) context.getMapOutputKeyClass();
- valueClass = (Class<V>) context.getMapOutputValueClass();
- comparator = (RawComparator<K>) context.getSortComparator();
- this.committer = committer;
- }
-
- private static class OutputConverter<K,V>
- extends org.apache.hadoop.mapreduce.RecordWriter<K,V> {
- OutputCollector<K,V> output;
- OutputConverter(OutputCollector<K,V> output) {
- this.output = output;
- }
-
- @Override
- public void close(org.apache.hadoop.mapreduce.TaskAttemptContext context){
- }
-
- @Override
- public void write(K key, V value
- ) throws IOException, InterruptedException {
- output.collect(key,value);
- }
- }
-
- @SuppressWarnings("unchecked")
- @Override
- void combine(RawKeyValueIterator iterator,
- OutputCollector<K,V> collector
- ) throws IOException, InterruptedException,
- ClassNotFoundException {
- // make a reducer
- org.apache.hadoop.mapreduce.Reducer<K,V,K,V> reducer =
- (org.apache.hadoop.mapreduce.Reducer<K,V,K,V>)
- ReflectionUtils.newInstance(reducerClass, job);
- org.apache.hadoop.mapreduce.Reducer.Context
- reducerContext = createReduceContext(reducer, job, taskId,
- iterator, null, inputCounter,
- new OutputConverter(collector),
- committer,
- reporter, comparator, keyClass,
- valueClass);
- reducer.run(reducerContext);
- }
- }
-
- BytesWritable getExtraData() {
- return extraData;
- }
-
- void setExtraData(BytesWritable extraData) {
- this.extraData = extraData;
- }
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskScheduler.java b/mapreduce/src/java/org/apache/hadoop/mapred/TaskScheduler.java
deleted file mode 100644
index 5bef8fc..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapred/TaskScheduler.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker;
-
-/**
- * Used by a {@link JobTracker} to schedule {@link Task}s on
- * {@link TaskTracker}s.
- * <p>
- * {@link TaskScheduler}s typically use one or more
- * {@link JobInProgressListener}s to receive notifications about jobs.
- * <p>
- * It is the responsibility of the {@link TaskScheduler}
- * to initialize tasks for a job, by calling {@link JobInProgress#initTasks()}
- * between the job being added (when
- * {@link JobInProgressListener#jobAdded(JobInProgress)} is called)
- * and tasks for that job being assigned (by
- * {@link #assignTasks(TaskTracker)}).
- * @see EagerTaskInitializationListener
- */
-abstract class TaskScheduler implements Configurable {
-
- protected Configuration conf;
- protected TaskTrackerManager taskTrackerManager;
-
- public Configuration getConf() {
- return conf;
- }
-
- public void setConf(Configuration conf) {
- this.conf = conf;
- }
-
- public synchronized void setTaskTrackerManager(
- TaskTrackerManager taskTrackerManager) {
- this.taskTrackerManager = taskTrackerManager;
- }
-
- /**
- * Lifecycle method to allow the scheduler to start any work in separate
- * threads.
- * @throws IOException
- */
- public void start() throws IOException {
- // do nothing
- }
-
- /**
- * Lifecycle method to allow the scheduler to stop any work it is doing.
- * @throws IOException
- */
- public void terminate() throws IOException {
- // do nothing
- }
-
- /**
- * Returns the tasks we'd like the TaskTracker to execute right now.
- *
- * @param taskTracker The TaskTracker for which we're looking for tasks.
- * @return A list of tasks to run on that TaskTracker, possibly empty.
- */
- public abstract List<Task> assignTasks(TaskTracker taskTracker)
- throws IOException;
-
- /**
- * Returns a collection of jobs in an order which is specific to
- * the particular scheduler.
- * @param queueName
- * @return
- */
- public abstract Collection<JobInProgress> getJobs(String queueName);
-
- /**
- * Abstract QueueRefresher class. Scheduler's can extend this and return an
- * instance of this in the {@link #getQueueRefresher()} method. The
- * {@link #refreshQueues(List)} method of this instance will be invoked by the
- * {@link QueueManager} whenever it gets a request from an administrator to
- * refresh its own queue-configuration. This method has a documented contract
- * between the {@link QueueManager} and the {@link TaskScheduler}.
- *
- * Before calling QueueRefresher, the caller must hold the lock to the
- * corresponding {@link TaskScheduler} (generally in the {@link JobTracker}).
- */
- abstract class QueueRefresher {
-
- /**
- * Refresh the queue-configuration in the scheduler. This method has the
- * following contract.
- * <ol>
- * <li>Before this method, {@link QueueManager} does a validation of the new
- * queue-configuration. For e.g, currently addition of new queues, or
- * removal of queues at any level in the hierarchy is not supported by
- * {@link QueueManager} and so are not supported for schedulers too.</li>
- * <li>Schedulers will be passed a list of {@link JobQueueInfo}s of the root
- * queues i.e. the queues at the top level. All the descendants are properly
- * linked from these top-level queues.</li>
- * <li>Schedulers should use the scheduler specific queue properties from
- * the newRootQueues, validate the properties themselves and apply them
- * internally.</li>
- * <li>
- * Once the method returns successfully from the schedulers, it is assumed
- * that the refresh of queue properties is successful throughout and will be
- * 'committed' internally to {@link QueueManager} too. It is guaranteed that
- * at no point, after successful return from the scheduler, is the queue
- * refresh in QueueManager failed. If ever, such abnormalities happen, the
- * queue framework will be inconsistent and will need a JT restart.</li>
- * <li>If scheduler throws an exception during {@link #refreshQueues()},
- * {@link QueueManager} throws away the newly read configuration, retains
- * the old (consistent) configuration and informs the request issuer about
- * the error appropriately.</li>
- * </ol>
- *
- * @param newRootQueues
- */
- abstract void refreshQueues(List<JobQueueInfo> newRootQueues)
- throws Throwable;
- }
-
- /**
- * Get the {@link QueueRefresher} for this scheduler. By default, no
- * {@link QueueRefresher} exists for a scheduler and is set to null.
- * Schedulers need to return an instance of {@link QueueRefresher} if they
- * wish to refresh their queue-configuration when {@link QueueManager}
- * refreshes its own queue-configuration via an administrator request.
- *
- * @return
- */
- QueueRefresher getQueueRefresher() {
- return null;
- }
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/TaskTracker.java b/mapreduce/src/java/org/apache/hadoop/mapred/TaskTracker.java
deleted file mode 100644
index 20e63fa..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapred/TaskTracker.java
+++ /dev/null
@@ -1,4225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.hadoop.mapred;
-
-import java.io.DataOutputStream;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.net.InetSocketAddress;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.StringTokenizer;
-import java.util.TreeMap;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.regex.Pattern;
-
-import javax.crypto.SecretKey;
-import javax.servlet.ServletContext;
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.DF;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.LocalDirAllocator;
-import org.apache.hadoop.fs.LocalFileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.http.HttpServer;
-import org.apache.hadoop.io.SecureIOUtils;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ipc.ProtocolSignature;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.ipc.Server;
-import org.apache.hadoop.mapred.TaskController.DebugScriptContext;
-import org.apache.hadoop.mapred.TaskController.JobInitializationContext;
-import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext;
-import org.apache.hadoop.mapred.TaskController.TaskControllerPathDeletionContext;
-import org.apache.hadoop.mapred.TaskController.TaskControllerTaskPathDeletionContext;
-import org.apache.hadoop.mapred.TaskController.TaskControllerJobPathDeletionContext;
-import org.apache.hadoop.mapred.TaskTrackerStatus.TaskTrackerHealthStatus;
-import org.apache.hadoop.mapred.pipes.Submitter;
-import org.apache.hadoop.mapreduce.MRConfig;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import static org.apache.hadoop.mapred.QueueManager.toFullPropertyName;
-import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.mapreduce.filecache.TrackerDistributedCacheManager;
-import org.apache.hadoop.mapreduce.security.SecureShuffleUtils;
-import org.apache.hadoop.mapreduce.security.TokenCache;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
-import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
-import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
-import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
-import org.apache.hadoop.mapreduce.server.tasktracker.Localizer;
-import org.apache.hadoop.mapreduce.task.reduce.ShuffleHeader;
-import org.apache.hadoop.metrics.MetricsContext;
-import org.apache.hadoop.metrics.MetricsException;
-import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.hadoop.metrics.MetricsUtil;
-import org.apache.hadoop.metrics.Updater;
-import org.apache.hadoop.net.DNS;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.apache.hadoop.util.DiskChecker;
-import org.apache.hadoop.mapreduce.util.ConfigUtil;
-import org.apache.hadoop.mapreduce.util.MemoryCalculatorPlugin;
-import org.apache.hadoop.mapreduce.util.ResourceCalculatorPlugin;
-import org.apache.hadoop.mapreduce.util.ProcfsBasedProcessTree;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.hadoop.util.RunJar;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.VersionInfo;
-import org.apache.hadoop.util.DiskChecker.DiskErrorException;
-import org.apache.hadoop.mapreduce.util.MRAsyncDiskService;
-
-/*******************************************************
- * TaskTracker is a process that starts and tracks MR Tasks
- * in a networked environment. It contacts the JobTracker
- * for Task assignments and reporting results.
- *
- *******************************************************/
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class TaskTracker
- implements MRConstants, TaskUmbilicalProtocol, Runnable, TTConfig {
- /**
- * @deprecated
- */
- @Deprecated
- static final String MAPRED_TASKTRACKER_VMEM_RESERVED_PROPERTY =
- "mapred.tasktracker.vmem.reserved";
- /**
- * @deprecated
- */
- @Deprecated
- static final String MAPRED_TASKTRACKER_PMEM_RESERVED_PROPERTY =
- "mapred.tasktracker.pmem.reserved";
-
-
- static final long WAIT_FOR_DONE = 3 * 1000;
- private int httpPort;
-
- static enum State {NORMAL, STALE, INTERRUPTED, DENIED}
-
- static{
- ConfigUtil.loadResources();
- }
-
- public static final Log LOG =
- LogFactory.getLog(TaskTracker.class);
-
- public static final String MR_CLIENTTRACE_FORMAT =
- "src: %s" + // src IP
- ", dest: %s" + // dst IP
- ", maps: %s" + // number of maps
- ", op: %s" + // operation
- ", reduceID: %s" + // reduce id
- ", duration: %s"; // duration
-
- public static final Log ClientTraceLog =
- LogFactory.getLog(TaskTracker.class.getName() + ".clienttrace");
-
- // Job ACLs file is created by TaskTracker under userlogs/$jobid directory for
- // each job at job localization time. This will be used by TaskLogServlet for
- // authorizing viewing of task logs of that job
- static String jobACLsFile = "job-acls.xml";
-
- volatile boolean running = true;
-
- private LocalDirAllocator localDirAllocator;
- String taskTrackerName;
- String localHostname;
- InetSocketAddress jobTrackAddr;
-
- InetSocketAddress taskReportAddress;
-
- Server taskReportServer = null;
- InterTrackerProtocol jobClient;
-
- private TrackerDistributedCacheManager distributedCacheManager;
-
- // last heartbeat response received
- short heartbeatResponseId = -1;
-
- static final String TASK_CLEANUP_SUFFIX = ".cleanup";
-
- /*
- * This is the last 'status' report sent by this tracker to the JobTracker.
- *
- * If the rpc call succeeds, this 'status' is cleared-out by this tracker;
- * indicating that a 'fresh' status report be generated; in the event the
- * rpc calls fails for whatever reason, the previous status report is sent
- * again.
- */
- TaskTrackerStatus status = null;
-
- // The system-directory on HDFS where job files are stored
- Path systemDirectory = null;
-
- // The filesystem where job files are stored
- FileSystem systemFS = null;
-
- private final HttpServer server;
-
- volatile boolean shuttingDown = false;
-
- Map<TaskAttemptID, TaskInProgress> tasks = new HashMap<TaskAttemptID, TaskInProgress>();
- /**
- * Map from taskId -> TaskInProgress.
- */
- Map<TaskAttemptID, TaskInProgress> runningTasks = null;
- Map<JobID, RunningJob> runningJobs = new TreeMap<JobID, RunningJob>();
- private final JobTokenSecretManager jobTokenSecretManager
- = new JobTokenSecretManager();
-
- volatile int mapTotal = 0;
- volatile int reduceTotal = 0;
- boolean justStarted = true;
- boolean justInited = true;
- // Mark reduce tasks that are shuffling to rollback their events index
- Set<TaskAttemptID> shouldReset = new HashSet<TaskAttemptID>();
-
- //dir -> DF
- Map<String, DF> localDirsDf = new HashMap<String, DF>();
- long minSpaceStart = 0;
- //must have this much space free to start new tasks
- boolean acceptNewTasks = true;
- long minSpaceKill = 0;
- //if we run under this limit, kill one task
- //and make sure we never receive any new jobs
- //until all the old tasks have been cleaned up.
- //this is if a machine is so full it's only good
- //for serving map output to the other nodes
-
- static Random r = new Random();
- public static final String SUBDIR = "taskTracker";
- static final String DISTCACHEDIR = "distcache";
- static final String JOBCACHE = "jobcache";
- static final String OUTPUT = "output";
- private static final String JARSDIR = "jars";
- static final String LOCAL_SPLIT_FILE = "split.dta";
- static final String LOCAL_SPLIT_META_FILE = "split.info";
- static final String JOBFILE = "job.xml";
- static final String JOB_TOKEN_FILE="jobToken"; //localized file
-
- static final String JOB_LOCAL_DIR = MRJobConfig.JOB_LOCAL_DIR;
-
- private JobConf fConf;
- private FileSystem localFs;
-
- private Localizer localizer;
-
- private int maxMapSlots;
- private int maxReduceSlots;
- private int failures;
-
- private ACLsManager aclsManager;
-
- // Performance-related config knob to send an out-of-band heartbeat
- // on task completion
- private volatile boolean oobHeartbeatOnTaskCompletion;
-
- // Track number of completed tasks to send an out-of-band heartbeat
- private IntWritable finishedCount = new IntWritable(0);
-
- private MapEventsFetcherThread mapEventsFetcher;
- int workerThreads;
- CleanupQueue directoryCleanupThread;
- private volatile JvmManager jvmManager;
- UserLogCleaner taskLogCleanupThread;
- private TaskMemoryManagerThread taskMemoryManager;
- private boolean taskMemoryManagerEnabled = true;
- private long totalVirtualMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT;
- private long totalPhysicalMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT;
- private long mapSlotMemorySizeOnTT = JobConf.DISABLED_MEMORY_LIMIT;
- private long reduceSlotSizeMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT;
- private long totalMemoryAllottedForTasks = JobConf.DISABLED_MEMORY_LIMIT;
- private long reservedPhysicalMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT;
- private ResourceCalculatorPlugin resourceCalculatorPlugin = null;
-
- /**
- * the minimum interval between jobtracker polls
- */
- private volatile int heartbeatInterval =
- JTConfig.JT_HEARTBEAT_INTERVAL_MIN_DEFAULT;
- /**
- * Number of maptask completion events locations to poll for at one time
- */
- private int probe_sample_size = 500;
-
- private IndexCache indexCache;
-
- private MRAsyncDiskService asyncDiskService;
-
- MRAsyncDiskService getAsyncDiskService() {
- return asyncDiskService;
- }
-
- void setAsyncDiskService(MRAsyncDiskService asyncDiskService) {
- this.asyncDiskService = asyncDiskService;
- }
-
- /**
- * Handle to the specific instance of the {@link TaskController} class
- */
- private TaskController taskController;
-
- /**
- * Handle to the specific instance of the {@link NodeHealthCheckerService}
- */
- private NodeHealthCheckerService healthChecker;
-
- /*
- * A list of commitTaskActions for whom commit response has been received
- */
- private List<TaskAttemptID> commitResponses =
- Collections.synchronizedList(new ArrayList<TaskAttemptID>());
-
- private ShuffleServerMetrics shuffleServerMetrics;
- /** This class contains the methods that should be used for metrics-reporting
- * the specific metrics for shuffle. The TaskTracker is actually a server for
- * the shuffle and hence the name ShuffleServerMetrics.
- */
- class ShuffleServerMetrics implements Updater {
- private MetricsRecord shuffleMetricsRecord = null;
- private int serverHandlerBusy = 0;
- private long outputBytes = 0;
- private int failedOutputs = 0;
- private int successOutputs = 0;
- private int exceptionsCaught = 0;
- ShuffleServerMetrics(JobConf conf) {
- MetricsContext context = MetricsUtil.getContext("mapred");
- shuffleMetricsRecord =
- MetricsUtil.createRecord(context, "shuffleOutput");
- this.shuffleMetricsRecord.setTag("sessionId", conf.getSessionId());
- context.registerUpdater(this);
- }
- synchronized void serverHandlerBusy() {
- ++serverHandlerBusy;
- }
- synchronized void serverHandlerFree() {
- --serverHandlerBusy;
- }
- synchronized void outputBytes(long bytes) {
- outputBytes += bytes;
- }
- synchronized void failedOutput() {
- ++failedOutputs;
- }
- synchronized void successOutput() {
- ++successOutputs;
- }
- synchronized void exceptionsCaught() {
- ++exceptionsCaught;
- }
- public void doUpdates(MetricsContext unused) {
- synchronized (this) {
- if (workerThreads != 0) {
- shuffleMetricsRecord.setMetric("shuffle_handler_busy_percent",
- 100*((float)serverHandlerBusy/workerThreads));
- } else {
- shuffleMetricsRecord.setMetric("shuffle_handler_busy_percent", 0);
- }
- shuffleMetricsRecord.incrMetric("shuffle_output_bytes",
- outputBytes);
- shuffleMetricsRecord.incrMetric("shuffle_failed_outputs",
- failedOutputs);
- shuffleMetricsRecord.incrMetric("shuffle_success_outputs",
- successOutputs);
- shuffleMetricsRecord.incrMetric("shuffle_exceptions_caught",
- exceptionsCaught);
- outputBytes = 0;
- failedOutputs = 0;
- successOutputs = 0;
- exceptionsCaught = 0;
- }
- shuffleMetricsRecord.update();
- }
- }
-
-
-
-
-
- private TaskTrackerInstrumentation myInstrumentation = null;
-
- public TaskTrackerInstrumentation getTaskTrackerInstrumentation() {
- return myInstrumentation;
- }
-
- // Currently used only in tests
- void setTaskTrackerInstrumentation(
- TaskTrackerInstrumentation trackerInstrumentation) {
- myInstrumentation = trackerInstrumentation;
- }
-
- /**
- * A list of tips that should be cleaned up.
- */
- private BlockingQueue<TaskTrackerAction> tasksToCleanup =
- new LinkedBlockingQueue<TaskTrackerAction>();
-
- @Override
- public ProtocolSignature getProtocolSignature(String protocol,
- long clientVersion, int clientMethodsHash) throws IOException {
- return ProtocolSignature.getProtocolSignature(
- this, protocol, clientVersion, clientMethodsHash);
- }
-
- /**
- * A daemon-thread that pulls tips off the list of things to cleanup.
- */
- private Thread taskCleanupThread =
- new Thread(new Runnable() {
- public void run() {
- while (true) {
- try {
- TaskTrackerAction action = tasksToCleanup.take();
- if (action instanceof KillJobAction) {
- purgeJob((KillJobAction) action);
- } else if (action instanceof KillTaskAction) {
- processKillTaskAction((KillTaskAction) action);
- } else {
- LOG.error("Non-delete action given to cleanup thread: "
- + action);
- }
- } catch (Throwable except) {
- LOG.warn(StringUtils.stringifyException(except));
- }
- }
- }
- }, "taskCleanup");
-
- void processKillTaskAction(KillTaskAction killAction) throws IOException {
- TaskInProgress tip;
- synchronized (TaskTracker.this) {
- tip = tasks.get(killAction.getTaskID());
- }
- LOG.info("Received KillTaskAction for task: " +
- killAction.getTaskID());
- purgeTask(tip, false);
- }
-
- public TaskController getTaskController() {
- return taskController;
- }
-
- // Currently this is used only by tests
- void setTaskController(TaskController t) {
- taskController = t;
- }
-
- private RunningJob addTaskToJob(JobID jobId,
- TaskInProgress tip) {
- synchronized (runningJobs) {
- RunningJob rJob = null;
- if (!runningJobs.containsKey(jobId)) {
- rJob = new RunningJob(jobId);
- rJob.localized = false;
- rJob.tasks = new HashSet<TaskInProgress>();
- runningJobs.put(jobId, rJob);
- } else {
- rJob = runningJobs.get(jobId);
- }
- synchronized (rJob) {
- rJob.tasks.add(tip);
- }
- runningJobs.notify(); //notify the fetcher thread
- return rJob;
- }
- }
-
- private void removeTaskFromJob(JobID jobId, TaskInProgress tip) {
- synchronized (runningJobs) {
- RunningJob rjob = runningJobs.get(jobId);
- if (rjob == null) {
- LOG.warn("Unknown job " + jobId + " being deleted.");
- } else {
- synchronized (rjob) {
- rjob.tasks.remove(tip);
- }
- }
- }
- }
-
- JobTokenSecretManager getJobTokenSecretManager() {
- return jobTokenSecretManager;
- }
-
- RunningJob getRunningJob(JobID jobId) {
- return runningJobs.get(jobId);
- }
-
- Localizer getLocalizer() {
- return localizer;
- }
-
- void setLocalizer(Localizer l) {
- localizer = l;
- }
-
- public static String getUserDir(String user) {
- return TaskTracker.SUBDIR + Path.SEPARATOR + user;
- }
-
- public static String getPrivateDistributedCacheDir(String user) {
- return getUserDir(user) + Path.SEPARATOR + TaskTracker.DISTCACHEDIR;
- }
-
- public static String getPublicDistributedCacheDir() {
- return TaskTracker.SUBDIR + Path.SEPARATOR + TaskTracker.DISTCACHEDIR;
- }
-
- public static String getJobCacheSubdir(String user) {
- return getUserDir(user) + Path.SEPARATOR + TaskTracker.JOBCACHE;
- }
-
- public static String getLocalJobDir(String user, String jobid) {
- return getJobCacheSubdir(user) + Path.SEPARATOR + jobid;
- }
-
- static String getLocalJobConfFile(String user, String jobid) {
- return getLocalJobDir(user, jobid) + Path.SEPARATOR + TaskTracker.JOBFILE;
- }
-
- static String getLocalJobTokenFile(String user, String jobid) {
- return getLocalJobDir(user, jobid) + Path.SEPARATOR + TaskTracker.JOB_TOKEN_FILE;
- }
-
-
- static String getTaskConfFile(String user, String jobid, String taskid,
- boolean isCleanupAttempt) {
- return getLocalTaskDir(user, jobid, taskid, isCleanupAttempt)
- + Path.SEPARATOR + TaskTracker.JOBFILE;
- }
-
- static String getJobJarsDir(String user, String jobid) {
- return getLocalJobDir(user, jobid) + Path.SEPARATOR + TaskTracker.JARSDIR;
- }
-
- static String getJobJarFile(String user, String jobid) {
- return getJobJarsDir(user, jobid) + Path.SEPARATOR + "job.jar";
- }
-
- static String getJobWorkDir(String user, String jobid) {
- return getLocalJobDir(user, jobid) + Path.SEPARATOR + MRConstants.WORKDIR;
- }
-
- static String getLocalSplitMetaFile(String user, String jobid, String taskid){
- return TaskTracker.getLocalTaskDir(user, jobid, taskid) + Path.SEPARATOR
- + TaskTracker.LOCAL_SPLIT_META_FILE;
- }
-
- static String getLocalSplitFile(String user, String jobid, String taskid) {
- return TaskTracker.getLocalTaskDir(user, jobid, taskid) + Path.SEPARATOR
- + TaskTracker.LOCAL_SPLIT_FILE;
- }
-
- static String getIntermediateOutputDir(String user, String jobid,
- String taskid) {
- return getLocalTaskDir(user, jobid, taskid) + Path.SEPARATOR
- + TaskTracker.OUTPUT;
- }
-
- static String getLocalTaskDir(String user, String jobid, String taskid) {
- return getLocalTaskDir(user, jobid, taskid, false);
- }
-
- public static String getLocalTaskDir(String user, String jobid, String taskid,
- boolean isCleanupAttempt) {
- String taskDir = getLocalJobDir(user, jobid) + Path.SEPARATOR + taskid;
- if (isCleanupAttempt) {
- taskDir = taskDir + TASK_CLEANUP_SUFFIX;
- }
- return taskDir;
- }
-
- static String getTaskWorkDir(String user, String jobid, String taskid,
- boolean isCleanupAttempt) {
- String dir = getLocalTaskDir(user, jobid, taskid, isCleanupAttempt);
- return dir + Path.SEPARATOR + MRConstants.WORKDIR;
- }
-
- String getPid(TaskAttemptID tid) {
- TaskInProgress tip = tasks.get(tid);
- if (tip != null) {
- return jvmManager.getPid(tip.getTaskRunner());
- }
- return null;
- }
-
- public long getProtocolVersion(String protocol,
- long clientVersion) throws IOException {
- if (protocol.equals(TaskUmbilicalProtocol.class.getName())) {
- return TaskUmbilicalProtocol.versionID;
- } else {
- throw new IOException("Unknown protocol for task tracker: " +
- protocol);
- }
- }
-
-
- int getHttpPort() {
- return httpPort;
- }
-
- /**
- * Do the real constructor work here. It's in a separate method
- * so we can call it again and "recycle" the object after calling
- * close().
- */
- synchronized void initialize() throws IOException, InterruptedException {
-
- LOG.info("Starting tasktracker with owner as " +
- aclsManager.getMROwner().getShortUserName());
-
- localFs = FileSystem.getLocal(fConf);
- // use configured nameserver & interface to get local hostname
- if (fConf.get(TT_HOST_NAME) != null) {
- this.localHostname = fConf.get(TT_HOST_NAME);
- }
- if (localHostname == null) {
- this.localHostname =
- DNS.getDefaultHost
- (fConf.get(TT_DNS_INTERFACE,"default"),
- fConf.get(TT_DNS_NAMESERVER,"default"));
- }
-
- // Check local disk, start async disk service, and clean up all
- // local directories.
- checkLocalDirs(this.fConf.getLocalDirs());
- setAsyncDiskService(new MRAsyncDiskService(fConf));
- getAsyncDiskService().cleanupAllVolumes();
-
- // Clear out state tables
- this.tasks.clear();
- this.runningTasks = new LinkedHashMap<TaskAttemptID, TaskInProgress>();
- this.runningJobs = new TreeMap<JobID, RunningJob>();
- this.mapTotal = 0;
- this.reduceTotal = 0;
- this.acceptNewTasks = true;
- this.status = null;
-
- this.minSpaceStart = this.fConf.getLong(TT_LOCAL_DIR_MINSPACE_START, 0L);
- this.minSpaceKill = this.fConf.getLong(TT_LOCAL_DIR_MINSPACE_KILL, 0L);
- //tweak the probe sample size (make it a function of numCopiers)
- probe_sample_size =
- this.fConf.getInt(TT_MAX_TASK_COMPLETION_EVENTS_TO_POLL, 500);
-
- // Set up TaskTracker instrumentation
- this.myInstrumentation = createInstrumentation(this, fConf);
-
- // bind address
- InetSocketAddress socAddr = NetUtils.createSocketAddr(
- fConf.get(TT_REPORT_ADDRESS, "127.0.0.1:0"));
- String bindAddress = socAddr.getHostName();
- int tmpPort = socAddr.getPort();
-
- this.jvmManager = new JvmManager(this);
-
- // RPC initialization
- int max = maxMapSlots > maxReduceSlots ?
- maxMapSlots : maxReduceSlots;
- //set the num handlers to max*2 since canCommit may wait for the duration
- //of a heartbeat RPC
- this.taskReportServer = RPC.getServer(this.getClass(), this, bindAddress,
- tmpPort, 2 * max, false, this.fConf, this.jobTokenSecretManager);
-
- // Set service-level authorization security policy
- if (this.fConf.getBoolean(
- CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
- PolicyProvider policyProvider =
- (PolicyProvider)(ReflectionUtils.newInstance(
- this.fConf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
- MapReducePolicyProvider.class, PolicyProvider.class),
- this.fConf));
- this.taskReportServer.refreshServiceAcl(fConf, policyProvider);
- }
-
- this.taskReportServer.start();
-
- // get the assigned address
- this.taskReportAddress = taskReportServer.getListenerAddress();
- this.fConf.set(TT_REPORT_ADDRESS,
- taskReportAddress.getHostName() + ":" + taskReportAddress.getPort());
- LOG.info("TaskTracker up at: " + this.taskReportAddress);
-
- this.taskTrackerName = "tracker_" + localHostname + ":" + taskReportAddress;
- LOG.info("Starting tracker " + taskTrackerName);
-
- Class<? extends TaskController> taskControllerClass = fConf.getClass(
- TT_TASK_CONTROLLER, DefaultTaskController.class, TaskController.class);
- taskController = (TaskController) ReflectionUtils.newInstance(
- taskControllerClass, fConf);
-
-
- // setup and create jobcache directory with appropriate permissions
- taskController.setup();
-
- // Initialize DistributedCache
- this.distributedCacheManager =
- new TrackerDistributedCacheManager(this.fConf, taskController,
- asyncDiskService);
- this.distributedCacheManager.startCleanupThread();
-
- this.jobClient = (InterTrackerProtocol)
- UserGroupInformation.getLoginUser().doAs(
- new PrivilegedExceptionAction<Object>() {
- public Object run() throws IOException {
- return RPC.waitForProxy(InterTrackerProtocol.class,
- InterTrackerProtocol.versionID,
- jobTrackAddr, fConf);
- }
- });
- this.justInited = true;
- this.running = true;
- // start the thread that will fetch map task completion events
- this.mapEventsFetcher = new MapEventsFetcherThread();
- mapEventsFetcher.setDaemon(true);
- mapEventsFetcher.setName(
- "Map-events fetcher for all reduce tasks " + "on " +
- taskTrackerName);
- mapEventsFetcher.start();
-
- Class<? extends ResourceCalculatorPlugin> clazz =
- fConf.getClass(TT_RESOURCE_CALCULATOR_PLUGIN,
- null, ResourceCalculatorPlugin.class);
- resourceCalculatorPlugin = ResourceCalculatorPlugin
- .getResourceCalculatorPlugin(clazz, fConf);
- LOG.info(" Using ResourceCalculatorPlugin : " + resourceCalculatorPlugin);
- initializeMemoryManagement();
-
- setIndexCache(new IndexCache(this.fConf));
-
- //clear old user logs
- taskLogCleanupThread.clearOldUserLogs(this.fConf);
-
- mapLauncher = new TaskLauncher(TaskType.MAP, maxMapSlots);
- reduceLauncher = new TaskLauncher(TaskType.REDUCE, maxReduceSlots);
- mapLauncher.start();
- reduceLauncher.start();
-
- // create a localizer instance
- setLocalizer(new Localizer(localFs, fConf.getLocalDirs(), taskController));
-
- //Start up node health checker service.
- if (shouldStartHealthMonitor(this.fConf)) {
- startHealthMonitor(this.fConf);
- }
-
- oobHeartbeatOnTaskCompletion =
- fConf.getBoolean(TT_OUTOFBAND_HEARBEAT, false);
- }
-
- /**
- * Are ACLs for authorization checks enabled on the MR cluster ?
- */
- boolean areACLsEnabled() {
- return fConf.getBoolean(MRConfig.MR_ACLS_ENABLED, false);
- }
-
- public static Class<?>[] getInstrumentationClasses(Configuration conf) {
- return conf.getClasses(TT_INSTRUMENTATION, TaskTrackerMetricsInst.class);
- }
-
- public static void setInstrumentationClass(
- Configuration conf, Class<? extends TaskTrackerInstrumentation> t) {
- conf.setClass(TT_INSTRUMENTATION,
- t, TaskTrackerInstrumentation.class);
- }
-
- public static TaskTrackerInstrumentation createInstrumentation(
- TaskTracker tt, Configuration conf) {
- try {
- Class<?>[] instrumentationClasses = getInstrumentationClasses(conf);
- if (instrumentationClasses.length == 0) {
- LOG.error("Empty string given for " + TT_INSTRUMENTATION +
- " property -- will use default instrumentation class instead");
- return new TaskTrackerMetricsInst(tt);
- } else if (instrumentationClasses.length == 1) {
- // Just one instrumentation class given; create it directly
- Class<?> cls = instrumentationClasses[0];
- java.lang.reflect.Constructor<?> c =
- cls.getConstructor(new Class[] {TaskTracker.class} );
- return (TaskTrackerInstrumentation) c.newInstance(tt);
- } else {
- // Multiple instrumentation classes given; use a composite object
- List<TaskTrackerInstrumentation> instrumentations =
- new ArrayList<TaskTrackerInstrumentation>();
- for (Class<?> cls: instrumentationClasses) {
- java.lang.reflect.Constructor<?> c =
- cls.getConstructor(new Class[] {TaskTracker.class} );
- TaskTrackerInstrumentation inst =
- (TaskTrackerInstrumentation) c.newInstance(tt);
- instrumentations.add(inst);
- }
- return new CompositeTaskTrackerInstrumentation(tt, instrumentations);
- }
- } catch(Exception e) {
- // Reflection can throw lots of exceptions -- handle them all by
- // falling back on the default.
- LOG.error("Failed to initialize TaskTracker metrics", e);
- return new TaskTrackerMetricsInst(tt);
- }
- }
-
- /**
- * Removes all contents of temporary storage. Called upon
- * startup, to remove any leftovers from previous run.
- *
- * Use MRAsyncDiskService.moveAndDeleteAllVolumes instead.
- * @see org.apache.hadoop.mapreduce.util.MRAsyncDiskService#cleanupAllVolumes()
- */
- @Deprecated
- public void cleanupStorage() throws IOException {
- this.fConf.deleteLocalFiles();
- }
-
- // Object on wait which MapEventsFetcherThread is going to wait.
- private Object waitingOn = new Object();
-
- private class MapEventsFetcherThread extends Thread {
-
- private List <FetchStatus> reducesInShuffle() {
- List <FetchStatus> fList = new ArrayList<FetchStatus>();
- for (Map.Entry <JobID, RunningJob> item : runningJobs.entrySet()) {
- RunningJob rjob = item.getValue();
- JobID jobId = item.getKey();
- FetchStatus f;
- synchronized (rjob) {
- f = rjob.getFetchStatus();
- for (TaskInProgress tip : rjob.tasks) {
- Task task = tip.getTask();
- if (!task.isMapTask()) {
- if (((ReduceTask)task).getPhase() ==
- TaskStatus.Phase.SHUFFLE) {
- if (rjob.getFetchStatus() == null) {
- //this is a new job; we start fetching its map events
- f = new FetchStatus(jobId,
- ((ReduceTask)task).getNumMaps());
- rjob.setFetchStatus(f);
- }
- f = rjob.getFetchStatus();
- fList.add(f);
- break; //no need to check any more tasks belonging to this
- }
- }
- }
- }
- }
- //at this point, we have information about for which of
- //the running jobs do we need to query the jobtracker for map
- //outputs (actually map events).
- return fList;
- }
-
- @Override
- public void run() {
- LOG.info("Starting thread: " + this.getName());
-
- while (running) {
- try {
- List <FetchStatus> fList = null;
- synchronized (runningJobs) {
- while (((fList = reducesInShuffle()).size()) == 0) {
- try {
- runningJobs.wait();
- } catch (InterruptedException e) {
- LOG.info("Shutting down: " + this.getName());
- return;
- }
- }
- }
- // now fetch all the map task events for all the reduce tasks
- // possibly belonging to different jobs
- boolean fetchAgain = false; //flag signifying whether we want to fetch
- //immediately again.
- for (FetchStatus f : fList) {
- long currentTime = System.currentTimeMillis();
- try {
- //the method below will return true when we have not
- //fetched all available events yet
- if (f.fetchMapCompletionEvents(currentTime)) {
- fetchAgain = true;
- }
- } catch (Exception e) {
- LOG.warn(
- "Ignoring exception that fetch for map completion" +
- " events threw for " + f.jobId + " threw: " +
- StringUtils.stringifyException(e));
- }
- if (!running) {
- break;
- }
- }
- synchronized (waitingOn) {
- try {
- if (!fetchAgain) {
- waitingOn.wait(heartbeatInterval);
- }
- } catch (InterruptedException ie) {
- LOG.info("Shutting down: " + this.getName());
- return;
- }
- }
- } catch (Exception e) {
- LOG.info("Ignoring exception " + e.getMessage());
- }
- }
- }
- }
-
- private class FetchStatus {
- /** The next event ID that we will start querying the JobTracker from*/
- private IntWritable fromEventId;
- /** This is the cache of map events for a given job */
- private List<TaskCompletionEvent> allMapEvents;
- /** What jobid this fetchstatus object is for*/
- private JobID jobId;
- private long lastFetchTime;
- private boolean fetchAgain;
-
- public FetchStatus(JobID jobId, int numMaps) {
- this.fromEventId = new IntWritable(0);
- this.jobId = jobId;
- this.allMapEvents = new ArrayList<TaskCompletionEvent>(numMaps);
- }
-
- /**
- * Reset the events obtained so far.
- */
- public void reset() {
- // Note that the sync is first on fromEventId and then on allMapEvents
- synchronized (fromEventId) {
- synchronized (allMapEvents) {
- fromEventId.set(0); // set the new index for TCE
- allMapEvents.clear();
- }
- }
- }
-
- public TaskCompletionEvent[] getMapEvents(int fromId, int max) {
-
- TaskCompletionEvent[] mapEvents =
- TaskCompletionEvent.EMPTY_ARRAY;
- boolean notifyFetcher = false;
- synchronized (allMapEvents) {
- if (allMapEvents.size() > fromId) {
- int actualMax = Math.min(max, (allMapEvents.size() - fromId));
- List <TaskCompletionEvent> eventSublist =
- allMapEvents.subList(fromId, actualMax + fromId);
- mapEvents = eventSublist.toArray(mapEvents);
- } else {
- // Notify Fetcher thread.
- notifyFetcher = true;
- }
- }
- if (notifyFetcher) {
- synchronized (waitingOn) {
- waitingOn.notify();
- }
- }
- return mapEvents;
- }
-
- public boolean fetchMapCompletionEvents(long currTime) throws IOException {
- if (!fetchAgain && (currTime - lastFetchTime) < heartbeatInterval) {
- return false;
- }
- int currFromEventId = 0;
- synchronized (fromEventId) {
- currFromEventId = fromEventId.get();
- List <TaskCompletionEvent> recentMapEvents =
- queryJobTracker(fromEventId, jobId, jobClient);
- synchronized (allMapEvents) {
- allMapEvents.addAll(recentMapEvents);
- }
- lastFetchTime = currTime;
- if (fromEventId.get() - currFromEventId >= probe_sample_size) {
- //return true when we have fetched the full payload, indicating
- //that we should fetch again immediately (there might be more to
- //fetch
- fetchAgain = true;
- return true;
- }
- }
- fetchAgain = false;
- return false;
- }
- }
-
- private static LocalDirAllocator lDirAlloc =
- new LocalDirAllocator(MRConfig.LOCAL_DIR);
-
- // intialize the job directory
- RunningJob localizeJob(TaskInProgress tip
- ) throws IOException, InterruptedException {
- Task t = tip.getTask();
- JobID jobId = t.getJobID();
- RunningJob rjob = addTaskToJob(jobId, tip);
-
- // Initialize the user directories if needed.
- getLocalizer().initializeUserDirs(t.getUser());
-
- synchronized (rjob) {
- if (!rjob.localized) {
-
- JobConf localJobConf = localizeJobFiles(t, rjob);
- // initialize job log directory
- initializeJobLogDir(jobId, localJobConf);
-
- // Now initialize the job via task-controller so as to set
- // ownership/permissions of jars, job-work-dir. Note that initializeJob
- // should be the last call after every other directory/file to be
- // directly under the job directory is created.
- JobInitializationContext context = new JobInitializationContext();
- context.jobid = jobId;
- context.user = t.getUser();
- context.workDir = new File(localJobConf.get(JOB_LOCAL_DIR));
- taskController.initializeJob(context);
-
- rjob.jobConf = localJobConf;
- rjob.keepJobFiles = ((localJobConf.getKeepTaskFilesPattern() != null) ||
- localJobConf.getKeepFailedTaskFiles());
- rjob.localized = true;
- }
- }
- return rjob;
- }
-
- private FileSystem getFS(final Path filePath, JobID jobId,
- final Configuration conf) throws IOException, InterruptedException {
- RunningJob rJob = runningJobs.get(jobId);
- FileSystem userFs =
- rJob.ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
- public FileSystem run() throws IOException {
- return filePath.getFileSystem(conf);
- }});
- return userFs;
- }
-
- /**
- * Localize the job on this tasktracker. Specifically
- * <ul>
- * <li>Cleanup and create job directories on all disks</li>
- * <li>Download the job config file job.xml from the FS</li>
- * <li>Create the job work directory and set {@link TaskTracker#JOB_LOCAL_DIR}
- * in the configuration.
- * <li>Download the job jar file job.jar from the FS, unjar it and set jar
- * file in the configuration.</li>
- * </ul>
- *
- * @param t task whose job has to be localized on this TT
- * @return the modified job configuration to be used for all the tasks of this
- * job as a starting point.
- * @throws IOException
- */
- JobConf localizeJobFiles(Task t, RunningJob rjob)
- throws IOException, InterruptedException {
- JobID jobId = t.getJobID();
- String userName = t.getUser();
-
- // Initialize the job directories
- FileSystem localFs = FileSystem.getLocal(fConf);
- getLocalizer().initializeJobDirs(userName, jobId);
- // save local copy of JobToken file
- String localJobTokenFile = localizeJobTokenFile(t.getUser(), jobId);
- rjob.ugi = UserGroupInformation.createRemoteUser(t.getUser());
-
- Credentials ts = TokenCache.loadTokens(localJobTokenFile, fConf);
- Token<JobTokenIdentifier> jt = TokenCache.getJobToken(ts);
- if (jt != null) { //could be null in the case of some unit tests
- getJobTokenSecretManager().addTokenForJob(jobId.toString(), jt);
- }
- for (Token<? extends TokenIdentifier> token : ts.getAllTokens()) {
- rjob.ugi.addToken(token);
- }
- // Download the job.xml for this job from the system FS
- Path localJobFile =
- localizeJobConfFile(new Path(t.getJobFile()), userName, jobId);
-
- JobConf localJobConf = new JobConf(localJobFile);
- //WE WILL TRUST THE USERNAME THAT WE GOT FROM THE JOBTRACKER
- //AS PART OF THE TASK OBJECT
- localJobConf.setUser(userName);
-
- // set the location of the token file into jobConf to transfer
- // the name to TaskRunner
- localJobConf.set(TokenCache.JOB_TOKENS_FILENAME,
- localJobTokenFile);
-
-
- // create the 'job-work' directory: job-specific shared directory for use as
- // scratch space by all tasks of the same job running on this TaskTracker.
- Path workDir =
- lDirAlloc.getLocalPathForWrite(getJobWorkDir(userName, jobId
- .toString()), fConf);
- if (!localFs.mkdirs(workDir)) {
- throw new IOException("Mkdirs failed to create "
- + workDir.toString());
- }
- System.setProperty(JOB_LOCAL_DIR, workDir.toUri().getPath());
- localJobConf.set(JOB_LOCAL_DIR, workDir.toUri().getPath());
- // Download the job.jar for this job from the system FS
- localizeJobJarFile(userName, jobId, localFs, localJobConf);
-
- return localJobConf;
- }
-
- // Create job userlog dir.
- // Create job acls file in job log dir, if needed.
- void initializeJobLogDir(JobID jobId, JobConf localJobConf)
- throws IOException {
- // remove it from tasklog cleanup thread first,
- // it might be added there because of tasktracker reinit or restart
- taskLogCleanupThread.unmarkJobFromLogDeletion(jobId);
- localizer.initializeJobLogDir(jobId);
-
- if (areACLsEnabled()) {
- // Create job-acls.xml file in job userlog dir and write the needed
- // info for authorization of users for viewing task logs of this job.
- writeJobACLs(localJobConf, TaskLog.getJobDir(jobId));
- }
- }
-
- /**
- * Creates job-acls.xml under the given directory logDir and writes
- * job-view-acl, queue-admins-acl, jobOwner name and queue name into this
- * file.
- * queue name is the queue to which the job was submitted to.
- * queue-admins-acl is the queue admins ACL of the queue to which this
- * job was submitted to.
- * @param conf job configuration
- * @param logDir job userlog dir
- * @throws IOException
- */
- private static void writeJobACLs(JobConf conf, File logDir)
- throws IOException {
- File aclFile = new File(logDir, jobACLsFile);
- JobConf aclConf = new JobConf(false);
-
- // set the job view acl in aclConf
- String jobViewACL = conf.get(MRJobConfig.JOB_ACL_VIEW_JOB, " ");
- aclConf.set(MRJobConfig.JOB_ACL_VIEW_JOB, jobViewACL);
-
- // set the job queue name in aclConf
- String queue = conf.getQueueName();
- aclConf.setQueueName(queue);
-
- // set the queue admins acl in aclConf
- String qACLName = toFullPropertyName(queue,
- QueueACL.ADMINISTER_JOBS.getAclName());
- String queueAdminsACL = conf.get(qACLName, " ");
- aclConf.set(qACLName, queueAdminsACL);
-
- // set jobOwner as user.name in aclConf
- String jobOwner = conf.getUser();
- aclConf.set("user.name", jobOwner);
-
- FileOutputStream out;
- try {
- out = SecureIOUtils.createForWrite(aclFile, 0600);
- } catch (SecureIOUtils.AlreadyExistsException aee) {
- LOG.warn("Job ACL file already exists at " + aclFile, aee);
- return;
- }
- try {
- aclConf.writeXml(out);
- } finally {
- out.close();
- }
- }
-
- /**
- * Download the job configuration file from the FS.
- *
- * @param t Task whose job file has to be downloaded
- * @param jobId jobid of the task
- * @return the local file system path of the downloaded file.
- * @throws IOException
- */
- private Path localizeJobConfFile(Path jobFile, String user, JobID jobId)
- throws IOException, InterruptedException {
- final JobConf conf = new JobConf(getJobConf());
- FileSystem userFs = getFS(jobFile, jobId, conf);
- // Get sizes of JobFile
- // sizes are -1 if they are not present.
- FileStatus status = null;
- long jobFileSize = -1;
- try {
- status = userFs.getFileStatus(jobFile);
- jobFileSize = status.getLen();
- } catch(FileNotFoundException fe) {
- jobFileSize = -1;
- }
-
- Path localJobFile =
- lDirAlloc.getLocalPathForWrite(getLocalJobConfFile(user, jobId.toString()),
- jobFileSize, fConf);
-
- // Download job.xml
- userFs.copyToLocalFile(jobFile, localJobFile);
- return localJobFile;
- }
-
- /**
- * Download the job jar file from FS to the local file system and unjar it.
- * Set the local jar file in the passed configuration.
- *
- * @param jobId
- * @param localFs
- * @param localJobConf
- * @throws IOException
- */
- private void localizeJobJarFile(String user, JobID jobId, FileSystem localFs,
- JobConf localJobConf)
- throws IOException, InterruptedException {
- // copy Jar file to the local FS and unjar it.
- String jarFile = localJobConf.getJar();
- FileStatus status = null;
- long jarFileSize = -1;
- if (jarFile != null) {
- Path jarFilePath = new Path(jarFile);
- FileSystem fs = getFS(jarFilePath, jobId, localJobConf);
- try {
- status = fs.getFileStatus(jarFilePath);
- jarFileSize = status.getLen();
- } catch (FileNotFoundException fe) {
- jarFileSize = -1;
- }
- // Here we check for five times the size of jarFileSize to accommodate for
- // unjarring the jar file in the jars directory
- Path localJarFile =
- lDirAlloc.getLocalPathForWrite(
- getJobJarFile(user, jobId.toString()), 5 * jarFileSize, fConf);
-
- // Download job.jar
- fs.copyToLocalFile(jarFilePath, localJarFile);
-
- localJobConf.setJar(localJarFile.toString());
-
- // Un-jar the parts of the job.jar that need to be added to the classpath
- RunJar.unJar(
- new File(localJarFile.toString()),
- new File(localJarFile.getParent().toString()),
- localJobConf.getJarUnpackPattern());
- }
- }
-
- protected void launchTaskForJob(TaskInProgress tip, JobConf jobConf,
- UserGroupInformation ugi) throws IOException {
- synchronized (tip) {
- tip.setJobConf(jobConf);
- tip.setUGI(ugi);
- tip.launchTask();
- }
- }
-
- public synchronized void shutdown() throws IOException {
- shuttingDown = true;
- close();
- if (this.server != null) {
- try {
- LOG.info("Shutting down StatusHttpServer");
- this.server.stop();
- } catch (Exception e) {
- LOG.warn("Exception shutting down TaskTracker", e);
- }
- }
- }
- /**
- * Close down the TaskTracker and all its components. We must also shutdown
- * any running tasks or threads, and cleanup disk space. A new TaskTracker
- * within the same process space might be restarted, so everything must be
- * clean.
- */
- public synchronized void close() throws IOException {
- //
- // Kill running tasks. Do this in a 2nd vector, called 'tasksToClose',
- // because calling jobHasFinished() may result in an edit to 'tasks'.
- //
- TreeMap<TaskAttemptID, TaskInProgress> tasksToClose =
- new TreeMap<TaskAttemptID, TaskInProgress>();
- tasksToClose.putAll(tasks);
- for (TaskInProgress tip : tasksToClose.values()) {
- tip.jobHasFinished(false);
- }
-
- this.running = false;
-
- if (asyncDiskService != null) {
- // Clear local storage
- asyncDiskService.cleanupAllVolumes();
-
- // Shutdown all async deletion threads with up to 10 seconds of delay
- asyncDiskService.shutdown();
- try {
- if (!asyncDiskService.awaitTermination(10000)) {
- asyncDiskService.shutdownNow();
- asyncDiskService = null;
- }
- } catch (InterruptedException e) {
- asyncDiskService.shutdownNow();
- asyncDiskService = null;
- }
- }
-
- // Shutdown the fetcher thread
- this.mapEventsFetcher.interrupt();
-
- //stop the launchers
- this.mapLauncher.interrupt();
- this.reduceLauncher.interrupt();
-
- this.distributedCacheManager.stopCleanupThread();
- jvmManager.stop();
-
- // shutdown RPC connections
- RPC.stopProxy(jobClient);
-
- // wait for the fetcher thread to exit
- for (boolean done = false; !done; ) {
- try {
- this.mapEventsFetcher.join();
- done = true;
- } catch (InterruptedException e) {
- }
- }
-
- if (taskReportServer != null) {
- taskReportServer.stop();
- taskReportServer = null;
- }
- if (healthChecker != null) {
- //stop node health checker service
- healthChecker.stop();
- healthChecker = null;
- }
- }
-
- /**
- * For testing
- */
- TaskTracker() {
- server = null;
- }
-
- void setConf(JobConf conf) {
- fConf = conf;
- }
-
- /**
- * Start with the local machine name, and the default JobTracker
- */
- public TaskTracker(JobConf conf) throws IOException, InterruptedException {
- fConf = conf;
- maxMapSlots = conf.getInt(TT_MAP_SLOTS, 2);
- maxReduceSlots = conf.getInt(TT_REDUCE_SLOTS, 2);
- aclsManager = new ACLsManager(fConf, new JobACLsManager(fConf), null);
- this.jobTrackAddr = JobTracker.getAddress(conf);
- InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(
- conf.get(TT_HTTP_ADDRESS, "0.0.0.0:50060"));
- String httpBindAddress = infoSocAddr.getHostName();
- int httpPort = infoSocAddr.getPort();
- this.server = new HttpServer("task", httpBindAddress, httpPort,
- httpPort == 0, conf, aclsManager.getAdminsAcl());
- workerThreads = conf.getInt(TT_HTTP_THREADS, 40);
- this.shuffleServerMetrics = new ShuffleServerMetrics(conf);
- server.setThreads(1, workerThreads);
- // let the jsp pages get to the task tracker, config, and other relevant
- // objects
- FileSystem local = FileSystem.getLocal(conf);
- this.localDirAllocator = new LocalDirAllocator(MRConfig.LOCAL_DIR);
- server.setAttribute("task.tracker", this);
- server.setAttribute("local.file.system", local);
- server.setAttribute("conf", conf);
- server.setAttribute("log", LOG);
- server.setAttribute("localDirAllocator", localDirAllocator);
- server.setAttribute("shuffleServerMetrics", shuffleServerMetrics);
- String exceptionStackRegex = conf.get(JTConfig.SHUFFLE_EXCEPTION_STACK_REGEX);
- String exceptionMsgRegex = conf.get(JTConfig.SHUFFLE_EXCEPTION_MSG_REGEX);
- server.setAttribute("exceptionStackRegex", exceptionStackRegex);
- server.setAttribute("exceptionMsgRegex", exceptionMsgRegex);
- server.addInternalServlet("mapOutput", "/mapOutput", MapOutputServlet.class);
- server.addServlet("taskLog", "/tasklog", TaskLogServlet.class);
- server.start();
- this.httpPort = server.getPort();
- checkJettyPort(httpPort);
- // create task log cleanup thread
- setTaskLogCleanupThread(new UserLogCleaner(fConf));
-
- UserGroupInformation.setConfiguration(fConf);
- SecurityUtil.login(fConf, TTConfig.TT_KEYTAB_FILE, TTConfig.TT_USER_NAME);
-
- initialize();
- }
-
- private void checkJettyPort(int port) throws IOException {
- //See HADOOP-4744
- if (port < 0) {
- shuttingDown = true;
- throw new IOException("Jetty problem. Jetty didn't bind to a " +
- "valid port");
- }
- }
-
- private void startCleanupThreads() throws IOException {
- taskCleanupThread.setDaemon(true);
- taskCleanupThread.start();
- directoryCleanupThread = new CleanupQueue();
- // start tasklog cleanup thread
- taskLogCleanupThread.setDaemon(true);
- taskLogCleanupThread.start();
- }
-
- // only used by tests
- void setCleanupThread(CleanupQueue c) {
- directoryCleanupThread = c;
- }
-
- CleanupQueue getCleanupThread() {
- return directoryCleanupThread;
- }
-
- UserLogCleaner getTaskLogCleanupThread() {
- return this.taskLogCleanupThread;
- }
-
- void setTaskLogCleanupThread(UserLogCleaner t) {
- this.taskLogCleanupThread = t;
- }
-
- void setIndexCache(IndexCache cache) {
- this.indexCache = cache;
- }
-
- /**
- * The connection to the JobTracker, used by the TaskRunner
- * for locating remote files.
- */
- public InterTrackerProtocol getJobClient() {
- return jobClient;
- }
-
- /** Return the port at which the tasktracker bound to */
- public synchronized InetSocketAddress getTaskTrackerReportAddress() {
- return taskReportAddress;
- }
-
- /** Queries the job tracker for a set of outputs ready to be copied
- * @param fromEventId the first event ID we want to start from, this is
- * modified by the call to this method
- * @param jobClient the job tracker
- * @return a set of locations to copy outputs from
- * @throws IOException
- */
- private List<TaskCompletionEvent> queryJobTracker(IntWritable fromEventId,
- JobID jobId,
- InterTrackerProtocol jobClient)
- throws IOException {
-
- TaskCompletionEvent t[] = jobClient.getTaskCompletionEvents(
- jobId,
- fromEventId.get(),
- probe_sample_size);
- //we are interested in map task completion events only. So store
- //only those
- List <TaskCompletionEvent> recentMapEvents =
- new ArrayList<TaskCompletionEvent>();
- for (int i = 0; i < t.length; i++) {
- if (t[i].isMapTask()) {
- recentMapEvents.add(t[i]);
- }
- }
- fromEventId.set(fromEventId.get() + t.length);
- return recentMapEvents;
- }
-
- /**
- * Main service loop. Will stay in this loop forever.
- */
- State offerService() throws Exception {
- long lastHeartbeat = 0;
-
- while (running && !shuttingDown) {
- try {
- long now = System.currentTimeMillis();
-
- long waitTime = heartbeatInterval - (now - lastHeartbeat);
- if (waitTime > 0) {
- // sleeps for the wait time or
- // until there are empty slots to schedule tasks
- synchronized (finishedCount) {
- if (finishedCount.get() == 0) {
- finishedCount.wait(waitTime);
- }
- finishedCount.set(0);
- }
- }
-
- // If the TaskTracker is just starting up:
- // 1. Verify the buildVersion
- // 2. Get the system directory & filesystem
- if(justInited) {
- String jobTrackerBV = jobClient.getBuildVersion();
- if(!VersionInfo.getBuildVersion().equals(jobTrackerBV)) {
- String msg = "Shutting down. Incompatible buildVersion." +
- "\nJobTracker's: " + jobTrackerBV +
- "\nTaskTracker's: "+ VersionInfo.getBuildVersion();
- LOG.error(msg);
- try {
- jobClient.reportTaskTrackerError(taskTrackerName, null, msg);
- } catch(Exception e ) {
- LOG.info("Problem reporting to jobtracker: " + e);
- }
- return State.DENIED;
- }
-
- String dir = jobClient.getSystemDir();
- if (dir == null) {
- throw new IOException("Failed to get system directory");
- }
- systemDirectory = new Path(dir);
- systemFS = systemDirectory.getFileSystem(fConf);
- }
-
- // Send the heartbeat and process the jobtracker's directives
- HeartbeatResponse heartbeatResponse = transmitHeartBeat(now);
-
- // Note the time when the heartbeat returned, use this to decide when to send the
- // next heartbeat
- lastHeartbeat = System.currentTimeMillis();
-
- TaskTrackerAction[] actions = heartbeatResponse.getActions();
- if(LOG.isDebugEnabled()) {
- LOG.debug("Got heartbeatResponse from JobTracker with responseId: " +
- heartbeatResponse.getResponseId() + " and " +
- ((actions != null) ? actions.length : 0) + " actions");
- }
- if (reinitTaskTracker(actions)) {
- return State.STALE;
- }
-
- // resetting heartbeat interval from the response.
- heartbeatInterval = heartbeatResponse.getHeartbeatInterval();
- justStarted = false;
- justInited = false;
- if (actions != null){
- for(TaskTrackerAction action: actions) {
- if (action instanceof LaunchTaskAction) {
- addToTaskQueue((LaunchTaskAction)action);
- } else if (action instanceof CommitTaskAction) {
- CommitTaskAction commitAction = (CommitTaskAction)action;
- if (!commitResponses.contains(commitAction.getTaskID())) {
- LOG.info("Received commit task action for " +
- commitAction.getTaskID());
- commitResponses.add(commitAction.getTaskID());
- }
- } else {
- tasksToCleanup.put(action);
- }
- }
- }
- markUnresponsiveTasks();
- killOverflowingTasks();
-
- //we've cleaned up, resume normal operation
- if (!acceptNewTasks && isIdle()) {
- acceptNewTasks=true;
- }
- //The check below may not be required every iteration but we are
- //erring on the side of caution here. We have seen many cases where
- //the call to jetty's getLocalPort() returns different values at
- //different times. Being a real paranoid here.
- checkJettyPort(server.getPort());
- } catch (InterruptedException ie) {
- LOG.info("Interrupted. Closing down.");
- return State.INTERRUPTED;
- } catch (DiskErrorException de) {
- String msg = "Exiting task tracker for disk error:\n" +
- StringUtils.stringifyException(de);
- LOG.error(msg);
- synchronized (this) {
- jobClient.reportTaskTrackerError(taskTrackerName,
- "DiskErrorException", msg);
- }
- return State.STALE;
- } catch (RemoteException re) {
- String reClass = re.getClassName();
- if (DisallowedTaskTrackerException.class.getName().equals(reClass)) {
- LOG.info("Tasktracker disallowed by JobTracker.");
- return State.DENIED;
- }
- } catch (Exception except) {
- String msg = "Caught exception: " +
- StringUtils.stringifyException(except);
- LOG.error(msg);
- }
- }
-
- return State.NORMAL;
- }
-
- private long previousUpdate = 0;
-
- /**
- * Build and transmit the heart beat to the JobTracker
- * @param now current time
- * @return false if the tracker was unknown
- * @throws IOException
- */
- HeartbeatResponse transmitHeartBeat(long now) throws IOException {
- // Send Counters in the status once every COUNTER_UPDATE_INTERVAL
- boolean sendAllCounters;
- if (now > (previousUpdate + COUNTER_UPDATE_INTERVAL)) {
- sendAllCounters = true;
- previousUpdate = now;
- }
- else {
- sendAllCounters = false;
- }
-
- //
- // Check if the last heartbeat got through...
- // if so then build the heartbeat information for the JobTracker;
- // else resend the previous status information.
- //
- if (status == null) {
- synchronized (this) {
- status = new TaskTrackerStatus(taskTrackerName, localHostname,
- httpPort,
- cloneAndResetRunningTaskStatuses(
- sendAllCounters),
- failures,
- maxMapSlots,
- maxReduceSlots);
- }
- } else {
- LOG.info("Resending 'status' to '" + jobTrackAddr.getHostName() +
- "' with reponseId '" + heartbeatResponseId);
- }
-
- //
- // Check if we should ask for a new Task
- //
- boolean askForNewTask;
- long localMinSpaceStart;
- synchronized (this) {
- askForNewTask =
- ((status.countOccupiedMapSlots() < maxMapSlots ||
- status.countOccupiedReduceSlots() < maxReduceSlots) &&
- acceptNewTasks);
- localMinSpaceStart = minSpaceStart;
- }
- if (askForNewTask) {
- checkLocalDirs(fConf.getLocalDirs());
- askForNewTask = enoughFreeSpace(localMinSpaceStart);
- long freeDiskSpace = getFreeSpace();
- long totVmem = getTotalVirtualMemoryOnTT();
- long totPmem = getTotalPhysicalMemoryOnTT();
- long availableVmem = getAvailableVirtualMemoryOnTT();
- long availablePmem = getAvailablePhysicalMemoryOnTT();
- long cumuCpuTime = getCumulativeCpuTimeOnTT();
- long cpuFreq = getCpuFrequencyOnTT();
- int numCpu = getNumProcessorsOnTT();
- float cpuUsage = getCpuUsageOnTT();
-
- status.getResourceStatus().setAvailableSpace(freeDiskSpace);
- status.getResourceStatus().setTotalVirtualMemory(totVmem);
- status.getResourceStatus().setTotalPhysicalMemory(totPmem);
- status.getResourceStatus().setMapSlotMemorySizeOnTT(
- mapSlotMemorySizeOnTT);
- status.getResourceStatus().setReduceSlotMemorySizeOnTT(
- reduceSlotSizeMemoryOnTT);
- status.getResourceStatus().setAvailableVirtualMemory(availableVmem);
- status.getResourceStatus().setAvailablePhysicalMemory(availablePmem);
- status.getResourceStatus().setCumulativeCpuTime(cumuCpuTime);
- status.getResourceStatus().setCpuFrequency(cpuFreq);
- status.getResourceStatus().setNumProcessors(numCpu);
- status.getResourceStatus().setCpuUsage(cpuUsage);
- }
- //add node health information
-
- TaskTrackerHealthStatus healthStatus = status.getHealthStatus();
- synchronized (this) {
- if (healthChecker != null) {
- healthChecker.setHealthStatus(healthStatus);
- } else {
- healthStatus.setNodeHealthy(true);
- healthStatus.setLastReported(0L);
- healthStatus.setHealthReport("");
- }
- }
- //
- // Xmit the heartbeat
- //
- HeartbeatResponse heartbeatResponse = jobClient.heartbeat(status,
- justStarted,
- justInited,
- askForNewTask,
- heartbeatResponseId);
-
- //
- // The heartbeat got through successfully!
- //
- heartbeatResponseId = heartbeatResponse.getResponseId();
-
- synchronized (this) {
- for (TaskStatus taskStatus : status.getTaskReports()) {
- if (taskStatus.getRunState() != TaskStatus.State.RUNNING &&
- taskStatus.getRunState() != TaskStatus.State.UNASSIGNED &&
- taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING &&
- !taskStatus.inTaskCleanupPhase()) {
- if (taskStatus.getIsMap()) {
- mapTotal--;
- } else {
- reduceTotal--;
- }
- try {
- myInstrumentation.completeTask(taskStatus.getTaskID());
- } catch (MetricsException me) {
- LOG.warn("Caught: " + StringUtils.stringifyException(me));
- }
- runningTasks.remove(taskStatus.getTaskID());
- }
- }
-
- // Clear transient status information which should only
- // be sent once to the JobTracker
- for (TaskInProgress tip: runningTasks.values()) {
- tip.getStatus().clearStatus();
- }
- }
-
- // Force a rebuild of 'status' on the next iteration
- status = null;
-
- return heartbeatResponse;
- }
-
- /**
- * Return the total virtual memory available on this TaskTracker.
- * @return total size of virtual memory.
- */
- long getTotalVirtualMemoryOnTT() {
- return totalVirtualMemoryOnTT;
- }
-
- /**
- * Return the total physical memory available on this TaskTracker.
- * @return total size of physical memory.
- */
- long getTotalPhysicalMemoryOnTT() {
- return totalPhysicalMemoryOnTT;
- }
-
- /**
- * Return the free virtual memory available on this TaskTracker.
- * @return total size of free virtual memory.
- */
- long getAvailableVirtualMemoryOnTT() {
- long availableVirtualMemoryOnTT = TaskTrackerStatus.UNAVAILABLE;
- if (resourceCalculatorPlugin != null) {
- availableVirtualMemoryOnTT =
- resourceCalculatorPlugin.getAvailableVirtualMemorySize();
- }
- return availableVirtualMemoryOnTT;
- }
-
- /**
- * Return the free physical memory available on this TaskTracker.
- * @return total size of free physical memory in bytes
- */
- long getAvailablePhysicalMemoryOnTT() {
- long availablePhysicalMemoryOnTT = TaskTrackerStatus.UNAVAILABLE;
- if (resourceCalculatorPlugin != null) {
- availablePhysicalMemoryOnTT =
- resourceCalculatorPlugin.getAvailablePhysicalMemorySize();
- }
- return availablePhysicalMemoryOnTT;
- }
-
- /**
- * Return the cumulative CPU used time on this TaskTracker since system is on
- * @return cumulative CPU used time in millisecond
- */
- long getCumulativeCpuTimeOnTT() {
- long cumulativeCpuTime = TaskTrackerStatus.UNAVAILABLE;
- if (resourceCalculatorPlugin != null) {
- cumulativeCpuTime = resourceCalculatorPlugin.getCumulativeCpuTime();
- }
- return cumulativeCpuTime;
- }
-
- /**
- * Return the number of Processors on this TaskTracker
- * @return number of processors
- */
- int getNumProcessorsOnTT() {
- int numProcessors = TaskTrackerStatus.UNAVAILABLE;
- if (resourceCalculatorPlugin != null) {
- numProcessors = resourceCalculatorPlugin.getNumProcessors();
- }
- return numProcessors;
- }
-
- /**
- * Return the CPU frequency of this TaskTracker
- * @return CPU frequency in kHz
- */
- long getCpuFrequencyOnTT() {
- long cpuFrequency = TaskTrackerStatus.UNAVAILABLE;
- if (resourceCalculatorPlugin != null) {
- cpuFrequency = resourceCalculatorPlugin.getCpuFrequency();
- }
- return cpuFrequency;
- }
-
- /**
- * Return the CPU usage in % of this TaskTracker
- * @return CPU usage in %
- */
- float getCpuUsageOnTT() {
- float cpuUsage = TaskTrackerStatus.UNAVAILABLE;
- if (resourceCalculatorPlugin != null) {
- cpuUsage = resourceCalculatorPlugin.getCpuUsage();
- }
- return cpuUsage;
- }
-
- long getTotalMemoryAllottedForTasksOnTT() {
- return totalMemoryAllottedForTasks;
- }
-
- /**
- * @return The amount of physical memory that will not be used for running
- * tasks in bytes. Returns JobConf.DISABLED_MEMORY_LIMIT if it is not
- * configured.
- */
- long getReservedPhysicalMemoryOnTT() {
- return reservedPhysicalMemoryOnTT;
- }
-
- /**
- * Check if the jobtracker directed a 'reset' of the tasktracker.
- *
- * @param actions the directives of the jobtracker for the tasktracker.
- * @return <code>true</code> if tasktracker is to be reset,
- * <code>false</code> otherwise.
- */
- private boolean reinitTaskTracker(TaskTrackerAction[] actions) {
- if (actions != null) {
- for (TaskTrackerAction action : actions) {
- if (action.getActionId() ==
- TaskTrackerAction.ActionType.REINIT_TRACKER) {
- LOG.info("Received ReinitTrackerAction from JobTracker");
- return true;
- }
- }
- }
- return false;
- }
-
- /**
- * Kill any tasks that have not reported progress in the last X seconds.
- */
- private synchronized void markUnresponsiveTasks() throws IOException {
- long now = System.currentTimeMillis();
- for (TaskInProgress tip: runningTasks.values()) {
- if (tip.getRunState() == TaskStatus.State.RUNNING ||
- tip.getRunState() == TaskStatus.State.COMMIT_PENDING ||
- tip.isCleaningup()) {
- // Check the per-job timeout interval for tasks;
- // an interval of '0' implies it is never timed-out
- long jobTaskTimeout = tip.getTaskTimeout();
- if (jobTaskTimeout == 0) {
- continue;
- }
-
- // Check if the task has not reported progress for a
- // time-period greater than the configured time-out
- long timeSinceLastReport = now - tip.getLastProgressReport();
- if (timeSinceLastReport > jobTaskTimeout && !tip.wasKilled) {
- String msg =
- "Task " + tip.getTask().getTaskID() + " failed to report status for "
- + (timeSinceLastReport / 1000) + " seconds. Killing!";
- LOG.info(tip.getTask().getTaskID() + ": " + msg);
- ReflectionUtils.logThreadInfo(LOG, "lost task", 30);
- tip.reportDiagnosticInfo(msg);
- myInstrumentation.timedoutTask(tip.getTask().getTaskID());
- dumpTaskStack(tip);
- purgeTask(tip, true);
- }
- }
- }
- }
-
- /**
- * Builds list of PathDeletionContext objects for the given paths
- */
- private static PathDeletionContext[] buildPathDeletionContexts(FileSystem fs,
- Path[] paths) {
- int i = 0;
- PathDeletionContext[] contexts = new PathDeletionContext[paths.length];
-
- for (Path p : paths) {
- contexts[i++] = new PathDeletionContext(fs, p.toUri().getPath());
- }
- return contexts;
- }
-
- /**
- * Builds list of {@link TaskControllerJobPathDeletionContext} objects for a
- * job each pointing to the job's jobLocalDir.
- * @param fs : FileSystem in which the dirs to be deleted
- * @param paths : mapred-local-dirs
- * @param id : {@link JobID} of the job for which the local-dir needs to
- * be cleaned up.
- * @param user : Job owner's username
- * @param taskController : the task-controller to be used for deletion of
- * jobLocalDir
- */
- static PathDeletionContext[] buildTaskControllerJobPathDeletionContexts(
- FileSystem fs, Path[] paths, JobID id, String user,
- TaskController taskController)
- throws IOException {
- int i = 0;
- PathDeletionContext[] contexts =
- new TaskControllerPathDeletionContext[paths.length];
-
- for (Path p : paths) {
- contexts[i++] = new TaskControllerJobPathDeletionContext(fs, p, id, user,
- taskController);
- }
- return contexts;
- }
-
- /**
- * Builds list of TaskControllerTaskPathDeletionContext objects for a task
- * @param fs : FileSystem in which the dirs to be deleted
- * @param paths : mapred-local-dirs
- * @param task : the task whose taskDir or taskWorkDir is going to be deleted
- * @param isWorkDir : the dir to be deleted is workDir or taskDir
- * @param taskController : the task-controller to be used for deletion of
- * taskDir or taskWorkDir
- */
- static PathDeletionContext[] buildTaskControllerTaskPathDeletionContexts(
- FileSystem fs, Path[] paths, Task task, boolean isWorkDir,
- TaskController taskController)
- throws IOException {
- int i = 0;
- PathDeletionContext[] contexts =
- new TaskControllerPathDeletionContext[paths.length];
-
- for (Path p : paths) {
- contexts[i++] = new TaskControllerTaskPathDeletionContext(fs, p, task,
- isWorkDir, taskController);
- }
- return contexts;
- }
-
- /**
- * Send a signal to a stuck task commanding it to dump stack traces
- * to stderr before we kill it with purgeTask().
- *
- * @param tip {@link TaskInProgress} to dump stack traces.
- */
- private void dumpTaskStack(TaskInProgress tip) {
- TaskRunner runner = tip.getTaskRunner();
- if (null == runner) {
- return; // tip is already abandoned.
- }
-
- JvmManager jvmMgr = runner.getJvmManager();
- jvmMgr.dumpStack(runner);
- }
-
- /**
- * The task tracker is done with this job, so we need to clean up.
- * @param action The action with the job
- * @throws IOException
- */
- synchronized void purgeJob(KillJobAction action) throws IOException {
- JobID jobId = action.getJobID();
- LOG.info("Received 'KillJobAction' for job: " + jobId);
- RunningJob rjob = null;
- synchronized (runningJobs) {
- rjob = runningJobs.get(jobId);
- }
-
- if (rjob == null) {
- LOG.warn("Unknown job " + jobId + " being deleted.");
- } else {
- synchronized (rjob) {
- // Add this tips of this job to queue of tasks to be purged
- for (TaskInProgress tip : rjob.tasks) {
- tip.jobHasFinished(false);
- Task t = tip.getTask();
- if (t.isMapTask()) {
- indexCache.removeMap(tip.getTask().getTaskID().toString());
- }
- }
- // Delete the job directory for this
- // task if the job is done/failed
- if (!rjob.keepJobFiles) {
- removeJobFiles(rjob.jobConf.getUser(), rjob.getJobID());
- }
- // add job to taskLogCleanupThread
- long now = System.currentTimeMillis();
- taskLogCleanupThread.markJobLogsForDeletion(now, rjob.jobConf,
- rjob.jobid);
-
- // Remove this job
- rjob.tasks.clear();
- // Close all FileSystems for this job
- try {
- FileSystem.closeAllForUGI(rjob.getUGI());
- } catch (IOException ie) {
- LOG.warn("Ignoring exception " + StringUtils.stringifyException(ie) +
- " while closing FileSystem for " + rjob.getUGI());
- }
- }
- }
-
- synchronized(runningJobs) {
- runningJobs.remove(jobId);
- }
- getJobTokenSecretManager().removeTokenForJob(jobId.toString());
- }
-
- /**
- * This job's files are no longer needed on this TT, remove them.
- *
- * @param rjob
- * @throws IOException
- */
- void removeJobFiles(String user, JobID jobId)
- throws IOException {
- PathDeletionContext[] contexts =
- buildTaskControllerJobPathDeletionContexts(localFs,
- getLocalFiles(fConf, ""), jobId, user, taskController);
- directoryCleanupThread.addToQueue(contexts);
- }
-
- /**
- * Remove the tip and update all relevant state.
- *
- * @param tip {@link TaskInProgress} to be removed.
- * @param wasFailure did the task fail or was it killed?
- */
- private void purgeTask(TaskInProgress tip, boolean wasFailure)
- throws IOException {
- if (tip != null) {
- LOG.info("About to purge task: " + tip.getTask().getTaskID());
-
- // Remove the task from running jobs,
- // removing the job if it's the last task
- removeTaskFromJob(tip.getTask().getJobID(), tip);
- tip.jobHasFinished(wasFailure);
- if (tip.getTask().isMapTask()) {
- indexCache.removeMap(tip.getTask().getTaskID().toString());
- }
- }
- }
-
- /** Check if we're dangerously low on disk space
- * If so, kill jobs to free up space and make sure
- * we don't accept any new tasks
- * Try killing the reduce jobs first, since I believe they
- * use up most space
- * Then pick the one with least progress
- */
- private void killOverflowingTasks() throws IOException {
- long localMinSpaceKill;
- synchronized(this){
- localMinSpaceKill = minSpaceKill;
- }
- if (!enoughFreeSpace(localMinSpaceKill)) {
- acceptNewTasks=false;
- //we give up! do not accept new tasks until
- //all the ones running have finished and they're all cleared up
- synchronized (this) {
- TaskInProgress killMe = findTaskToKill(null);
-
- if (killMe!=null) {
- String msg = "Tasktracker running out of space." +
- " Killing task.";
- LOG.info(killMe.getTask().getTaskID() + ": " + msg);
- killMe.reportDiagnosticInfo(msg);
- purgeTask(killMe, false);
- }
- }
- }
- }
-
- /**
- * Pick a task to kill to free up memory/disk-space
- * @param tasksToExclude tasks that are to be excluded while trying to find a
- * task to kill. If null, all runningTasks will be searched.
- * @return the task to kill or null, if one wasn't found
- */
- synchronized TaskInProgress findTaskToKill(List<TaskAttemptID> tasksToExclude) {
- TaskInProgress killMe = null;
- for (Iterator it = runningTasks.values().iterator(); it.hasNext();) {
- TaskInProgress tip = (TaskInProgress) it.next();
-
- if (tasksToExclude != null
- && tasksToExclude.contains(tip.getTask().getTaskID())) {
- // exclude this task
- continue;
- }
-
- if ((tip.getRunState() == TaskStatus.State.RUNNING ||
- tip.getRunState() == TaskStatus.State.COMMIT_PENDING) &&
- !tip.wasKilled) {
-
- if (killMe == null) {
- killMe = tip;
-
- } else if (!tip.getTask().isMapTask()) {
- //reduce task, give priority
- if (killMe.getTask().isMapTask() ||
- (tip.getTask().getProgress().get() <
- killMe.getTask().getProgress().get())) {
-
- killMe = tip;
- }
-
- } else if (killMe.getTask().isMapTask() &&
- tip.getTask().getProgress().get() <
- killMe.getTask().getProgress().get()) {
- //map task, only add if the progress is lower
-
- killMe = tip;
- }
- }
- }
- return killMe;
- }
-
- /**
- * Check if any of the local directories has enough
- * free space (more than minSpace)
- *
- * If not, do not try to get a new task assigned
- * @return
- * @throws IOException
- */
- private boolean enoughFreeSpace(long minSpace) throws IOException {
- if (minSpace == 0) {
- return true;
- }
- return minSpace < getFreeSpace();
- }
-
- private long getFreeSpace() throws IOException {
- long biggestSeenSoFar = 0;
- String[] localDirs = fConf.getLocalDirs();
- for (int i = 0; i < localDirs.length; i++) {
- DF df = null;
- if (localDirsDf.containsKey(localDirs[i])) {
- df = localDirsDf.get(localDirs[i]);
- } else {
- df = new DF(new File(localDirs[i]), fConf);
- localDirsDf.put(localDirs[i], df);
- }
-
- long availOnThisVol = df.getAvailable();
- if (availOnThisVol > biggestSeenSoFar) {
- biggestSeenSoFar = availOnThisVol;
- }
- }
-
- //Should ultimately hold back the space we expect running tasks to use but
- //that estimate isn't currently being passed down to the TaskTrackers
- return biggestSeenSoFar;
- }
-
- private TaskLauncher mapLauncher;
- private TaskLauncher reduceLauncher;
- public JvmManager getJvmManagerInstance() {
- return jvmManager;
- }
-
- // called from unit test
- void setJvmManagerInstance(JvmManager jvmManager) {
- this.jvmManager = jvmManager;
- }
-
- private void addToTaskQueue(LaunchTaskAction action) {
- if (action.getTask().isMapTask()) {
- mapLauncher.addToTaskQueue(action);
- } else {
- reduceLauncher.addToTaskQueue(action);
- }
- }
-
- // This method is called from unit tests
- int getFreeSlots(boolean isMap) {
- if (isMap) {
- return mapLauncher.numFreeSlots.get();
- } else {
- return reduceLauncher.numFreeSlots.get();
- }
- }
-
- class TaskLauncher extends Thread {
- private IntWritable numFreeSlots;
- private final int maxSlots;
- private List<TaskInProgress> tasksToLaunch;
-
- public TaskLauncher(TaskType taskType, int numSlots) {
- this.maxSlots = numSlots;
- this.numFreeSlots = new IntWritable(numSlots);
- this.tasksToLaunch = new LinkedList<TaskInProgress>();
- setDaemon(true);
- setName("TaskLauncher for " + taskType + " tasks");
- }
-
- public void addToTaskQueue(LaunchTaskAction action) {
- synchronized (tasksToLaunch) {
- TaskInProgress tip = registerTask(action, this);
- tasksToLaunch.add(tip);
- tasksToLaunch.notifyAll();
- }
- }
-
- public void cleanTaskQueue() {
- tasksToLaunch.clear();
- }
-
- public void addFreeSlots(int numSlots) {
- synchronized (numFreeSlots) {
- numFreeSlots.set(numFreeSlots.get() + numSlots);
- assert (numFreeSlots.get() <= maxSlots);
- LOG.info("addFreeSlot : current free slots : " + numFreeSlots.get());
- numFreeSlots.notifyAll();
- }
- }
-
- void notifySlots() {
- synchronized (numFreeSlots) {
- numFreeSlots.notifyAll();
- }
- }
-
- int getNumWaitingTasksToLaunch() {
- synchronized (tasksToLaunch) {
- return tasksToLaunch.size();
- }
- }
-
- public void run() {
- while (!Thread.interrupted()) {
- try {
- TaskInProgress tip;
- Task task;
- synchronized (tasksToLaunch) {
- while (tasksToLaunch.isEmpty()) {
- tasksToLaunch.wait();
- }
- //get the TIP
- tip = tasksToLaunch.remove(0);
- task = tip.getTask();
- LOG.info("Trying to launch : " + tip.getTask().getTaskID() +
- " which needs " + task.getNumSlotsRequired() + " slots");
- }
- //wait for free slots to run
- synchronized (numFreeSlots) {
- boolean canLaunch = true;
- while (numFreeSlots.get() < task.getNumSlotsRequired()) {
- //Make sure that there is no kill task action for this task!
- //We are not locking tip here, because it would reverse the
- //locking order!
- //Also, Lock for the tip is not required here! because :
- // 1. runState of TaskStatus is volatile
- // 2. Any notification is not missed because notification is
- // synchronized on numFreeSlots. So, while we are doing the check,
- // if the tip is half way through the kill(), we don't miss
- // notification for the following wait().
- if (!tip.canBeLaunched()) {
- //got killed externally while still in the launcher queue
- LOG.info("Not blocking slots for " + task.getTaskID()
- + " as it got killed externally. Task's state is "
- + tip.getRunState());
- canLaunch = false;
- break;
- }
- LOG.info("TaskLauncher : Waiting for " + task.getNumSlotsRequired() +
- " to launch " + task.getTaskID() + ", currently we have " +
- numFreeSlots.get() + " free slots");
- numFreeSlots.wait();
- }
- if (!canLaunch) {
- continue;
- }
- LOG.info("In TaskLauncher, current free slots : " + numFreeSlots.get()+
- " and trying to launch "+tip.getTask().getTaskID() +
- " which needs " + task.getNumSlotsRequired() + " slots");
- numFreeSlots.set(numFreeSlots.get() - task.getNumSlotsRequired());
- assert (numFreeSlots.get() >= 0);
- }
- synchronized (tip) {
- //to make sure that there is no kill task action for this
- if (!tip.canBeLaunched()) {
- //got killed externally while still in the launcher queue
- LOG.info("Not launching task " + task.getTaskID() + " as it got"
- + " killed externally. Task's state is " + tip.getRunState());
- addFreeSlots(task.getNumSlotsRequired());
- continue;
- }
- tip.slotTaken = true;
- }
- //got a free slot. launch the task
- startNewTask(tip);
- } catch (InterruptedException e) {
- return; // ALL DONE
- } catch (Throwable th) {
- LOG.error("TaskLauncher error " +
- StringUtils.stringifyException(th));
- }
- }
- }
- }
- private TaskInProgress registerTask(LaunchTaskAction action,
- TaskLauncher launcher) {
- Task t = action.getTask();
- LOG.info("LaunchTaskAction (registerTask): " + t.getTaskID() +
- " task's state:" + t.getState());
- TaskInProgress tip = new TaskInProgress(t, this.fConf, launcher);
- synchronized (this) {
- tasks.put(t.getTaskID(), tip);
- runningTasks.put(t.getTaskID(), tip);
- boolean isMap = t.isMapTask();
- if (isMap) {
- mapTotal++;
- } else {
- reduceTotal++;
- }
- }
- return tip;
- }
- /**
- * Start a new task.
- * All exceptions are handled locally, so that we don't mess up the
- * task tracker.
- */
- void startNewTask(final TaskInProgress tip) {
- Thread launchThread = new Thread(new Runnable() {
- @Override
- public void run() {
- try {
- RunningJob rjob = localizeJob(tip);
- // Localization is done. Neither rjob.jobConf nor rjob.ugi can be null
- launchTaskForJob(tip, new JobConf(rjob.getJobConf()), rjob.ugi);
- } catch (Throwable e) {
- String msg = ("Error initializing " + tip.getTask().getTaskID() +
- ":\n" + StringUtils.stringifyException(e));
- LOG.warn(msg);
- tip.reportDiagnosticInfo(msg);
- try {
- tip.kill(true);
- tip.cleanup(true);
- } catch (IOException ie2) {
- LOG.info("Error cleaning up " + tip.getTask().getTaskID() + ":\n" +
- StringUtils.stringifyException(ie2));
- }
- if (e instanceof Error) {
- LOG.error("TaskLauncher error " +
- StringUtils.stringifyException(e));
- }
- }
- }
- });
- launchThread.start();
-
- }
-
- void addToMemoryManager(TaskAttemptID attemptId, boolean isMap,
- JobConf conf) {
- if (!isTaskMemoryManagerEnabled()) {
- return; // Skip this if TaskMemoryManager is not enabled.
- }
- // Obtain physical memory limits from the job configuration
- long physicalMemoryLimit =
- conf.getLong(isMap ? MRJobConfig.MAP_MEMORY_PHYSICAL_MB :
- MRJobConfig.REDUCE_MEMORY_PHYSICAL_MB,
- JobConf.DISABLED_MEMORY_LIMIT);
- if (physicalMemoryLimit > 0) {
- physicalMemoryLimit *= 1024L * 1024L;
- }
-
- // Obtain virtual memory limits from the job configuration
- long virtualMemoryLimit = isMap ?
- conf.getMemoryForMapTask() * 1024 * 1024 :
- conf.getMemoryForReduceTask() * 1024 * 1024;
-
- taskMemoryManager.addTask(attemptId, virtualMemoryLimit,
- physicalMemoryLimit);
- }
-
- void removeFromMemoryManager(TaskAttemptID attemptId) {
- // Remove the entry from taskMemoryManagerThread's data structures.
- if (isTaskMemoryManagerEnabled()) {
- taskMemoryManager.removeTask(attemptId);
- }
- }
-
- /**
- * Notify the tasktracker to send an out-of-band heartbeat.
- */
- private void notifyTTAboutTaskCompletion() {
- if (oobHeartbeatOnTaskCompletion) {
- synchronized (finishedCount) {
- int value = finishedCount.get();
- finishedCount.set(value+1);
- finishedCount.notify();
- }
- }
- }
-
- /**
- * The server retry loop.
- * This while-loop attempts to connect to the JobTracker. It only
- * loops when the old TaskTracker has gone bad (its state is
- * stale somehow) and we need to reinitialize everything.
- */
- public void run() {
- try {
- startCleanupThreads();
- boolean denied = false;
- while (running && !shuttingDown && !denied) {
- boolean staleState = false;
- try {
- // This while-loop attempts reconnects if we get network errors
- while (running && !staleState && !shuttingDown && !denied) {
- try {
- State osState = offerService();
- if (osState == State.STALE) {
- staleState = true;
- } else if (osState == State.DENIED) {
- denied = true;
- }
- } catch (Exception ex) {
- if (!shuttingDown) {
- LOG.info("Lost connection to JobTracker [" +
- jobTrackAddr + "]. Retrying...", ex);
- try {
- Thread.sleep(5000);
- } catch (InterruptedException ie) {
- }
- }
- }
- }
- } finally {
- close();
- }
- if (shuttingDown) { return; }
- LOG.warn("Reinitializing local state");
- initialize();
- }
- if (denied) {
- shutdown();
- }
- } catch (IOException iex) {
- LOG.error("Got fatal exception while reinitializing TaskTracker: " +
- StringUtils.stringifyException(iex));
- return;
- }
- catch (InterruptedException i) {
- LOG.error("Got interrupted while reinitializing TaskTracker: " +
- i.getMessage());
- return;
- }
- }
-
- ///////////////////////////////////////////////////////
- // TaskInProgress maintains all the info for a Task that
- // lives at this TaskTracker. It maintains the Task object,
- // its TaskStatus, and the TaskRunner.
- ///////////////////////////////////////////////////////
- class TaskInProgress {
- Task task;
- long lastProgressReport;
- StringBuffer diagnosticInfo = new StringBuffer();
- private TaskRunner runner;
- volatile boolean done = false;
- volatile boolean wasKilled = false;
- private JobConf defaultJobConf;
- private JobConf localJobConf;
- private boolean keepFailedTaskFiles;
- private boolean alwaysKeepTaskFiles;
- private TaskStatus taskStatus;
- private long taskTimeout;
- private String debugCommand;
- private volatile boolean slotTaken = false;
- private TaskLauncher launcher;
-
- // The ugi of the user who is running the job. This contains all the tokens
- // too which will be populated during job-localization
- private UserGroupInformation ugi;
-
- UserGroupInformation getUGI() {
- return ugi;
- }
-
- void setUGI(UserGroupInformation userUGI) {
- ugi = userUGI;
- }
-
- /**
- */
- public TaskInProgress(Task task, JobConf conf) {
- this(task, conf, null);
- }
-
- public TaskInProgress(Task task, JobConf conf, TaskLauncher launcher) {
- this.task = task;
- this.launcher = launcher;
- this.lastProgressReport = System.currentTimeMillis();
- this.defaultJobConf = conf;
- localJobConf = null;
- taskStatus = TaskStatus.createTaskStatus(task.isMapTask(), task.getTaskID(),
- 0.0f,
- task.getNumSlotsRequired(),
- task.getState(),
- diagnosticInfo.toString(),
- "initializing",
- getName(),
- task.isTaskCleanupTask() ?
- TaskStatus.Phase.CLEANUP :
- task.isMapTask()? TaskStatus.Phase.MAP:
- TaskStatus.Phase.SHUFFLE,
- task.getCounters());
- taskTimeout = (10 * 60 * 1000);
- }
-
- void localizeTask(Task task) throws IOException{
-
- FileSystem localFs = FileSystem.getLocal(fConf);
-
- // create taskDirs on all the disks.
- getLocalizer().initializeAttemptDirs(task.getUser(),
- task.getJobID().toString(), task.getTaskID().toString(),
- task.isTaskCleanupTask());
-
- // create the working-directory of the task
- Path cwd =
- lDirAlloc.getLocalPathForWrite(getTaskWorkDir(task.getUser(), task
- .getJobID().toString(), task.getTaskID().toString(), task
- .isTaskCleanupTask()), defaultJobConf);
- if (!localFs.mkdirs(cwd)) {
- throw new IOException("Mkdirs failed to create "
- + cwd.toString());
- }
-
- localJobConf.set(LOCAL_DIR,
- fConf.get(LOCAL_DIR));
-
- if (fConf.get(TT_HOST_NAME) != null) {
- localJobConf.set(TT_HOST_NAME, fConf.get(TT_HOST_NAME));
- }
-
- keepFailedTaskFiles = localJobConf.getKeepFailedTaskFiles();
-
- // Do the task-type specific localization
- task.localizeConfiguration(localJobConf);
-
- List<String[]> staticResolutions = NetUtils.getAllStaticResolutions();
- if (staticResolutions != null && staticResolutions.size() > 0) {
- StringBuffer str = new StringBuffer();
-
- for (int i = 0; i < staticResolutions.size(); i++) {
- String[] hostToResolved = staticResolutions.get(i);
- str.append(hostToResolved[0]+"="+hostToResolved[1]);
- if (i != staticResolutions.size() - 1) {
- str.append(',');
- }
- }
- localJobConf.set(TT_STATIC_RESOLUTIONS, str.toString());
- }
- if (task.isMapTask()) {
- debugCommand = localJobConf.getMapDebugScript();
- } else {
- debugCommand = localJobConf.getReduceDebugScript();
- }
- String keepPattern = localJobConf.getKeepTaskFilesPattern();
- if (keepPattern != null) {
- alwaysKeepTaskFiles =
- Pattern.matches(keepPattern, task.getTaskID().toString());
- } else {
- alwaysKeepTaskFiles = false;
- }
- if (debugCommand != null || localJobConf.getProfileEnabled() ||
- alwaysKeepTaskFiles || keepFailedTaskFiles) {
- //disable jvm reuse
- localJobConf.setNumTasksToExecutePerJvm(1);
- }
- task.setConf(localJobConf);
- }
-
- /**
- */
- public Task getTask() {
- return task;
- }
-
- TaskRunner getTaskRunner() {
- return runner;
- }
-
- void setTaskRunner(TaskRunner rnr) {
- this.runner = rnr;
- }
-
- public synchronized void setJobConf(JobConf lconf){
- this.localJobConf = lconf;
- keepFailedTaskFiles = localJobConf.getKeepFailedTaskFiles();
- taskTimeout = localJobConf.getLong(MRJobConfig.TASK_TIMEOUT,
- 10 * 60 * 1000);
- }
-
- public synchronized JobConf getJobConf() {
- return localJobConf;
- }
-
- /**
- */
- public synchronized TaskStatus getStatus() {
- taskStatus.setDiagnosticInfo(diagnosticInfo.toString());
- if (diagnosticInfo.length() > 0) {
- diagnosticInfo = new StringBuffer();
- }
-
- return taskStatus;
- }
-
- /**
- * Kick off the task execution
- */
- public synchronized void launchTask() throws IOException {
- if (this.taskStatus.getRunState() == TaskStatus.State.UNASSIGNED ||
- this.taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN ||
- this.taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN) {
- localizeTask(task);
- if (this.taskStatus.getRunState() == TaskStatus.State.UNASSIGNED) {
- this.taskStatus.setRunState(TaskStatus.State.RUNNING);
- }
- setTaskRunner(task.createRunner(TaskTracker.this, this));
- this.runner.start();
- this.taskStatus.setStartTime(System.currentTimeMillis());
- } else {
- LOG.info("Not launching task: " + task.getTaskID() +
- " since it's state is " + this.taskStatus.getRunState());
- }
- }
-
- boolean isCleaningup() {
- return this.taskStatus.inTaskCleanupPhase();
- }
-
- // checks if state has been changed for the task to be launched
- boolean canBeLaunched() {
- return (getRunState() == TaskStatus.State.UNASSIGNED ||
- getRunState() == TaskStatus.State.FAILED_UNCLEAN ||
- getRunState() == TaskStatus.State.KILLED_UNCLEAN);
- }
-
- /**
- * The task is reporting its progress
- */
- public synchronized void reportProgress(TaskStatus taskStatus)
- {
- LOG.info(task.getTaskID() + " " + taskStatus.getProgress() +
- "% " + taskStatus.getStateString());
- // task will report its state as
- // COMMIT_PENDING when it is waiting for commit response and
- // when it is committing.
- // cleanup attempt will report its state as FAILED_UNCLEAN/KILLED_UNCLEAN
- if (this.done ||
- (this.taskStatus.getRunState() != TaskStatus.State.RUNNING &&
- this.taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING &&
- !isCleaningup()) ||
- ((this.taskStatus.getRunState() == TaskStatus.State.COMMIT_PENDING ||
- this.taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN ||
- this.taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN) &&
- taskStatus.getRunState() == TaskStatus.State.RUNNING)) {
- //make sure we ignore progress messages after a task has
- //invoked TaskUmbilicalProtocol.done() or if the task has been
- //KILLED/FAILED/FAILED_UNCLEAN/KILLED_UNCLEAN
- //Also ignore progress update if the state change is from
- //COMMIT_PENDING/FAILED_UNCLEAN/KILLED_UNCLEA to RUNNING
- LOG.info(task.getTaskID() + " Ignoring status-update since " +
- ((this.done) ? "task is 'done'" :
- ("runState: " + this.taskStatus.getRunState()))
- );
- return;
- }
-
- this.taskStatus.statusUpdate(taskStatus);
- this.lastProgressReport = System.currentTimeMillis();
- }
-
- /**
- */
- public long getLastProgressReport() {
- return lastProgressReport;
- }
-
- /**
- */
- public TaskStatus.State getRunState() {
- return taskStatus.getRunState();
- }
-
- /**
- * The task's configured timeout.
- *
- * @return the task's configured timeout.
- */
- public long getTaskTimeout() {
- return taskTimeout;
- }
-
- /**
- * The task has reported some diagnostic info about its status
- */
- public synchronized void reportDiagnosticInfo(String info) {
- this.diagnosticInfo.append(info);
- }
-
- public synchronized void reportNextRecordRange(SortedRanges.Range range) {
- this.taskStatus.setNextRecordRange(range);
- }
-
- /**
- * The task is reporting that it's done running
- */
- public synchronized void reportDone() {
- if (isCleaningup()) {
- if (this.taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN) {
- this.taskStatus.setRunState(TaskStatus.State.FAILED);
- } else if (this.taskStatus.getRunState() ==
- TaskStatus.State.KILLED_UNCLEAN) {
- this.taskStatus.setRunState(TaskStatus.State.KILLED);
- }
- } else {
- this.taskStatus.setRunState(TaskStatus.State.SUCCEEDED);
- }
- this.taskStatus.setProgress(1.0f);
- this.taskStatus.setFinishTime(System.currentTimeMillis());
- this.done = true;
- jvmManager.taskFinished(runner);
- runner.signalDone();
- LOG.info("Task " + task.getTaskID() + " is done.");
- LOG.info("reported output size for " + task.getTaskID() + " was " + taskStatus.getOutputSize());
- myInstrumentation.statusUpdate(task, taskStatus);
- }
-
- public boolean wasKilled() {
- return wasKilled;
- }
-
- /**
- * A task is reporting in as 'done'.
- *
- * We need to notify the tasktracker to send an out-of-band heartbeat.
- * If isn't <code>commitPending</code>, we need to finalize the task
- * and release the slot it's occupied.
- *
- * @param commitPending is the task-commit pending?
- */
- void reportTaskFinished(boolean commitPending) {
- if (!commitPending) {
- try {
- taskFinished();
- } finally {
- releaseSlot();
- }
- }
- notifyTTAboutTaskCompletion();
- }
-
- /* State changes:
- * RUNNING/COMMIT_PENDING -> FAILED_UNCLEAN/FAILED/KILLED_UNCLEAN/KILLED
- * FAILED_UNCLEAN -> FAILED
- * KILLED_UNCLEAN -> KILLED
- */
- private void setTaskFailState(boolean wasFailure) {
- // go FAILED_UNCLEAN -> FAILED and KILLED_UNCLEAN -> KILLED always
- if (taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN) {
- taskStatus.setRunState(TaskStatus.State.FAILED);
- } else if (taskStatus.getRunState() ==
- TaskStatus.State.KILLED_UNCLEAN) {
- taskStatus.setRunState(TaskStatus.State.KILLED);
- } else if (task.isMapOrReduce() &&
- taskStatus.getPhase() != TaskStatus.Phase.CLEANUP) {
- if (wasFailure) {
- taskStatus.setRunState(TaskStatus.State.FAILED_UNCLEAN);
- } else {
- taskStatus.setRunState(TaskStatus.State.KILLED_UNCLEAN);
- }
- } else {
- if (wasFailure) {
- taskStatus.setRunState(TaskStatus.State.FAILED);
- } else {
- taskStatus.setRunState(TaskStatus.State.KILLED);
- }
- }
- }
-
- /**
- * The task has actually finished running.
- */
- public void taskFinished() {
- long start = System.currentTimeMillis();
-
- //
- // Wait until task reports as done. If it hasn't reported in,
- // wait for a second and try again.
- //
- while (!done && (System.currentTimeMillis() - start < WAIT_FOR_DONE)) {
- try {
- Thread.sleep(1000);
- } catch (InterruptedException ie) {
- }
- }
-
- //
- // Change state to success or failure, depending on whether
- // task was 'done' before terminating
- //
- boolean needCleanup = false;
- synchronized (this) {
- // Remove the task from MemoryManager, if the task SUCCEEDED or FAILED.
- // KILLED tasks are removed in method kill(), because Kill
- // would result in launching a cleanup attempt before
- // TaskRunner returns; if remove happens here, it would remove
- // wrong task from memory manager.
- if (done || !wasKilled) {
- removeFromMemoryManager(task.getTaskID());
- }
- if (!done) {
- if (!wasKilled) {
- failures += 1;
- setTaskFailState(true);
- // call the script here for the failed tasks.
- if (debugCommand != null) {
- try {
- runDebugScript();
- } catch (Exception e) {
- String msg =
- "Debug-script could not be run successfully : "
- + StringUtils.stringifyException(e);
- LOG.warn(msg);
- reportDiagnosticInfo(msg);
- }
- }
- }
- taskStatus.setProgress(0.0f);
- }
- this.taskStatus.setFinishTime(System.currentTimeMillis());
- needCleanup = (taskStatus.getRunState() == TaskStatus.State.FAILED ||
- taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN ||
- taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN ||
- taskStatus.getRunState() == TaskStatus.State.KILLED);
- }
-
- //
- // If the task has failed, or if the task was killAndCleanup()'ed,
- // we should clean up right away. We only wait to cleanup
- // if the task succeeded, and its results might be useful
- // later on to downstream job processing.
- //
- if (needCleanup) {
- removeTaskFromJob(task.getJobID(), this);
- }
-
- cleanup(needCleanup);
- }
-
- /**
- * Run the debug-script now. Because debug-script can be user code, we use
- * {@link TaskController} to execute the debug script.
- *
- * @throws IOException
- */
- private void runDebugScript() throws IOException {
- String taskStdout ="";
- String taskStderr ="";
- String taskSyslog ="";
- String jobConf = task.getJobFile();
- try {
- // get task's stdout file
- taskStdout = FileUtil
- .makeShellPath(TaskLog.getRealTaskLogFileLocation(task.getTaskID(),
- task.isTaskCleanupTask(), TaskLog.LogName.STDOUT));
- // get task's stderr file
- taskStderr = FileUtil
- .makeShellPath(TaskLog.getRealTaskLogFileLocation(task.getTaskID(),
- task.isTaskCleanupTask(), TaskLog.LogName.STDERR));
- // get task's syslog file
- taskSyslog = FileUtil
- .makeShellPath(TaskLog.getRealTaskLogFileLocation(task.getTaskID(),
- task.isTaskCleanupTask(), TaskLog.LogName.SYSLOG));
- } catch(Exception e){
- LOG.warn("Exception finding task's stdout/err/syslog files", e);
- }
- File workDir = new File(lDirAlloc.getLocalPathToRead(
- TaskTracker.getLocalTaskDir(task.getUser(), task.getJobID()
- .toString(), task.getTaskID().toString(), task
- .isTaskCleanupTask())
- + Path.SEPARATOR + MRConstants.WORKDIR, localJobConf).toString());
- // Build the command
- File stdout = TaskLog.getTaskLogFile(task.getTaskID(), task
- .isTaskCleanupTask(), TaskLog.LogName.DEBUGOUT);
- // add pipes program as argument if it exists.
- String program ="";
- String executable = Submitter.getExecutable(localJobConf);
- if ( executable != null) {
- try {
- program = new URI(executable).getFragment();
- } catch (URISyntaxException ur) {
- LOG.warn("Problem in the URI fragment for pipes executable");
- }
- }
- String [] debug = debugCommand.split(" ");
- List<String> vargs = new ArrayList<String>();
- for (String component : debug) {
- vargs.add(component);
- }
- vargs.add(taskStdout);
- vargs.add(taskStderr);
- vargs.add(taskSyslog);
- vargs.add(jobConf);
- vargs.add(program);
- DebugScriptContext context =
- new TaskController.DebugScriptContext();
- context.args = vargs;
- context.stdout = stdout;
- context.workDir = workDir;
- context.task = task;
- getTaskController().runDebugScript(context);
- // add the lines of debug out to diagnostics
- int num = localJobConf.getInt(MRJobConfig.TASK_DEBUGOUT_LINES, -1);
- addDiagnostics(FileUtil.makeShellPath(stdout), num, "DEBUG OUT");
- }
-
- /**
- * Add last 'num' lines of the given file to the diagnostics.
- * if num =-1, all the lines of file are added to the diagnostics.
- * @param file The file from which to collect diagnostics.
- * @param num The number of lines to be sent to diagnostics.
- * @param tag The tag is printed before the diagnostics are printed.
- */
- public void addDiagnostics(String file, int num, String tag) {
- RandomAccessFile rafile = null;
- try {
- rafile = new RandomAccessFile(file,"r");
- int no_lines =0;
- String line = null;
- StringBuffer tail = new StringBuffer();
- tail.append("\n-------------------- "+tag+"---------------------\n");
- String[] lines = null;
- if (num >0) {
- lines = new String[num];
- }
- while ((line = rafile.readLine()) != null) {
- no_lines++;
- if (num >0) {
- if (no_lines <= num) {
- lines[no_lines-1] = line;
- }
- else { // shift them up
- for (int i=0; i<num-1; ++i) {
- lines[i] = lines[i+1];
- }
- lines[num-1] = line;
- }
- }
- else if (num == -1) {
- tail.append(line);
- tail.append("\n");
- }
- }
- int n = no_lines > num ?num:no_lines;
- if (num >0) {
- for (int i=0;i<n;i++) {
- tail.append(lines[i]);
- tail.append("\n");
- }
- }
- if(n!=0)
- reportDiagnosticInfo(tail.toString());
- } catch (FileNotFoundException fnfe){
- LOG.warn("File "+file+ " not found");
- } catch (IOException ioe){
- LOG.warn("Error reading file "+file);
- } finally {
- try {
- if (rafile != null) {
- rafile.close();
- }
- } catch (IOException ioe) {
- LOG.warn("Error closing file "+file);
- }
- }
- }
-
- /**
- * We no longer need anything from this task, as the job has
- * finished. If the task is still running, kill it and clean up.
- *
- * @param wasFailure did the task fail, as opposed to was it killed by
- * the framework
- */
- public void jobHasFinished(boolean wasFailure) throws IOException {
- // Kill the task if it is still running
- synchronized(this){
- if (getRunState() == TaskStatus.State.RUNNING ||
- getRunState() == TaskStatus.State.UNASSIGNED ||
- getRunState() == TaskStatus.State.COMMIT_PENDING ||
- isCleaningup()) {
- kill(wasFailure);
- }
- }
-
- // Cleanup on the finished task
- cleanup(true);
- }
-
- /**
- * Something went wrong and the task must be killed.
- *
- * @param wasFailure was it a failure (versus a kill request)?
- */
- public synchronized void kill(boolean wasFailure) throws IOException {
- if (taskStatus.getRunState() == TaskStatus.State.RUNNING ||
- taskStatus.getRunState() == TaskStatus.State.COMMIT_PENDING ||
- isCleaningup()) {
- wasKilled = true;
- if (wasFailure) {
- failures += 1;
- }
- // runner could be null if task-cleanup attempt is not localized yet
- if (runner != null) {
- runner.kill();
- }
- setTaskFailState(wasFailure);
- } else if (taskStatus.getRunState() == TaskStatus.State.UNASSIGNED) {
- if (wasFailure) {
- failures += 1;
- taskStatus.setRunState(TaskStatus.State.FAILED);
- } else {
- taskStatus.setRunState(TaskStatus.State.KILLED);
- }
- }
- taskStatus.setFinishTime(System.currentTimeMillis());
- removeFromMemoryManager(task.getTaskID());
- releaseSlot();
- myInstrumentation.statusUpdate(task, taskStatus);
- notifyTTAboutTaskCompletion();
- }
-
- private synchronized void releaseSlot() {
- if (slotTaken) {
- if (launcher != null) {
- launcher.addFreeSlots(task.getNumSlotsRequired());
- }
- slotTaken = false;
- } else {
- // wake up the launcher. it may be waiting to block slots for this task.
- if (launcher != null) {
- launcher.notifySlots();
- }
- }
- }
-
- /**
- * The map output has been lost.
- */
- private synchronized void mapOutputLost(String failure
- ) throws IOException {
- if (taskStatus.getRunState() == TaskStatus.State.COMMIT_PENDING ||
- taskStatus.getRunState() == TaskStatus.State.SUCCEEDED) {
- // change status to failure
- LOG.info("Reporting output lost:"+task.getTaskID());
- taskStatus.setRunState(TaskStatus.State.FAILED);
- taskStatus.setProgress(0.0f);
- reportDiagnosticInfo("Map output lost, rescheduling: " +
- failure);
- runningTasks.put(task.getTaskID(), this);
- mapTotal++;
- myInstrumentation.statusUpdate(task, taskStatus);
- } else {
- LOG.warn("Output already reported lost:"+task.getTaskID());
- }
- }
-
- /**
- * We no longer need anything from this task. Either the
- * controlling job is all done and the files have been copied
- * away, or the task failed and we don't need the remains.
- * Any calls to cleanup should not lock the tip first.
- * cleanup does the right thing- updates tasks in Tasktracker
- * by locking tasktracker first and then locks the tip.
- *
- * if needCleanup is true, the whole task directory is cleaned up.
- * otherwise the current working directory of the task
- * i.e. <taskid>/work is cleaned up.
- */
- void cleanup(boolean needCleanup) {
- TaskAttemptID taskId = task.getTaskID();
- LOG.debug("Cleaning up " + taskId);
-
-
- synchronized (TaskTracker.this) {
- if (needCleanup) {
- // see if tasks data structure is holding this tip.
- // tasks could hold the tip for cleanup attempt, if cleanup attempt
- // got launched before this method.
- if (tasks.get(taskId) == this) {
- tasks.remove(taskId);
- }
- }
- synchronized (this){
- if (alwaysKeepTaskFiles ||
- (taskStatus.getRunState() == TaskStatus.State.FAILED &&
- keepFailedTaskFiles)) {
- return;
- }
- }
- }
- synchronized (this) {
- // localJobConf could be null if localization has not happened
- // then no cleanup will be required.
- if (localJobConf == null) {
- return;
- }
- try {
- removeTaskFiles(needCleanup, taskId);
- } catch (Throwable ie) {
- LOG.info("Error cleaning up task runner: "
- + StringUtils.stringifyException(ie));
- }
- }
- }
-
- /**
- * Some or all of the files from this task are no longer required. Remove
- * them via CleanupQueue.
- *
- * @param needCleanup
- * @param taskId
- * @throws IOException
- */
- void removeTaskFiles(boolean needCleanup, TaskAttemptID taskId)
- throws IOException {
- if (needCleanup) {
- if (runner != null) {
- // cleans up the output directory of the task (where map outputs
- // and reduce inputs get stored)
- runner.close();
- }
-
- if (localJobConf.getNumTasksToExecutePerJvm() == 1) {
- // No jvm reuse, remove everything
- PathDeletionContext[] contexts =
- buildTaskControllerTaskPathDeletionContexts(localFs,
- getLocalFiles(fConf, ""), task, false/* not workDir */,
- taskController);
- directoryCleanupThread.addToQueue(contexts);
- } else {
- // Jvm reuse. We don't delete the workdir since some other task
- // (running in the same JVM) might be using the dir. The JVM
- // running the tasks would clean the workdir per a task in the
- // task process itself.
- String localTaskDir =
- getLocalTaskDir(task.getUser(), task.getJobID().toString(), taskId
- .toString(), task.isTaskCleanupTask());
- PathDeletionContext[] contexts = buildPathDeletionContexts(
- localFs, getLocalFiles(defaultJobConf, localTaskDir +
- Path.SEPARATOR + TaskTracker.JOBFILE));
- directoryCleanupThread.addToQueue(contexts);
- }
- } else {
- if (localJobConf.getNumTasksToExecutePerJvm() == 1) {
- PathDeletionContext[] contexts =
- buildTaskControllerTaskPathDeletionContexts(localFs,
- getLocalFiles(fConf, ""), task, true /* workDir */,
- taskController);
- directoryCleanupThread.addToQueue(contexts);
- }
- }
- }
-
- @Override
- public boolean equals(Object obj) {
- return (obj instanceof TaskInProgress) &&
- task.getTaskID().equals
- (((TaskInProgress) obj).getTask().getTaskID());
- }
-
- @Override
- public int hashCode() {
- return task.getTaskID().hashCode();
- }
- }
-
- /**
- * Check that the current UGI is the JVM authorized to report
- * for this particular job.
- *
- * @throws IOException for unauthorized access
- */
- private void ensureAuthorizedJVM(JobID jobId) throws IOException {
- String currentJobId =
- UserGroupInformation.getCurrentUser().getUserName();
- if (!currentJobId.equals(jobId.toString())) {
- throw new IOException ("JVM with " + currentJobId +
- " is not authorized for " + jobId);
- }
- }
-
-
- // ///////////////////////////////////////////////////////////////
- // TaskUmbilicalProtocol
- /////////////////////////////////////////////////////////////////
-
- /**
- * Called upon startup by the child process, to fetch Task data.
- */
- public synchronized JvmTask getTask(JvmContext context)
- throws IOException {
- ensureAuthorizedJVM(context.jvmId.getJobId());
- JVMId jvmId = context.jvmId;
-
- // save pid of task JVM sent by child
- jvmManager.setPidToJvm(jvmId, context.pid);
-
- LOG.debug("JVM with ID : " + jvmId + " asked for a task");
- if (!jvmManager.isJvmKnown(jvmId)) {
- LOG.info("Killing unknown JVM " + jvmId);
- return new JvmTask(null, true);
- }
- RunningJob rjob = runningJobs.get(jvmId.getJobId());
- if (rjob == null) { //kill the JVM since the job is dead
- LOG.info("Killing JVM " + jvmId + " since job " + jvmId.getJobId() +
- " is dead");
- jvmManager.killJvm(jvmId);
- return new JvmTask(null, true);
- }
- TaskInProgress tip = jvmManager.getTaskForJvm(jvmId);
- if (tip == null) {
- return new JvmTask(null, false);
- }
- if (tasks.get(tip.getTask().getTaskID()) != null) { //is task still present
- LOG.info("JVM with ID: " + jvmId + " given task: " +
- tip.getTask().getTaskID());
- return new JvmTask(tip.getTask(), false);
- } else {
- LOG.info("Killing JVM with ID: " + jvmId + " since scheduled task: " +
- tip.getTask().getTaskID() + " is " + tip.taskStatus.getRunState());
- return new JvmTask(null, true);
- }
- }
-
- /**
- * Called periodically to report Task progress, from 0.0 to 1.0.
- */
- public synchronized boolean statusUpdate(TaskAttemptID taskid,
- TaskStatus taskStatus)
- throws IOException {
- ensureAuthorizedJVM(taskid.getJobID());
- TaskInProgress tip = tasks.get(taskid);
- if (tip != null) {
- tip.reportProgress(taskStatus);
- myInstrumentation.statusUpdate(tip.getTask(), taskStatus);
- return true;
- } else {
- LOG.warn("Progress from unknown child task: "+taskid);
- return false;
- }
- }
-
- /**
- * Called when the task dies before completion, and we want to report back
- * diagnostic info
- */
- public synchronized void reportDiagnosticInfo(TaskAttemptID taskid, String info) throws IOException {
- ensureAuthorizedJVM(taskid.getJobID());
- internalReportDiagnosticInfo(taskid, info);
- }
-
- /**
- * Same as reportDiagnosticInfo but does not authorize caller. This is used
- * internally within MapReduce, whereas reportDiagonsticInfo may be called
- * via RPC.
- */
- synchronized void internalReportDiagnosticInfo(TaskAttemptID taskid, String info) throws IOException {
- TaskInProgress tip = tasks.get(taskid);
- if (tip != null) {
- tip.reportDiagnosticInfo(info);
- } else {
- LOG.warn("Error from unknown child task: "+taskid+". Ignored.");
- }
- }
-
- public synchronized void reportNextRecordRange(TaskAttemptID taskid,
- SortedRanges.Range range) throws IOException {
- ensureAuthorizedJVM(taskid.getJobID());
- TaskInProgress tip = tasks.get(taskid);
- if (tip != null) {
- tip.reportNextRecordRange(range);
- } else {
- LOG.warn("reportNextRecordRange from unknown child task: "+taskid+". " +
- "Ignored.");
- }
- }
-
- /** Child checking to see if we're alive. Normally does nothing.*/
- public synchronized boolean ping(TaskAttemptID taskid) throws IOException {
- ensureAuthorizedJVM(taskid.getJobID());
- return tasks.get(taskid) != null;
- }
-
- /**
- * Task is reporting that it is in commit_pending
- * and it is waiting for the commit Response
- */
- public synchronized void commitPending(TaskAttemptID taskid,
- TaskStatus taskStatus)
- throws IOException {
- ensureAuthorizedJVM(taskid.getJobID());
- LOG.info("Task " + taskid + " is in commit-pending," +"" +
- " task state:" +taskStatus.getRunState());
- statusUpdate(taskid, taskStatus);
- reportTaskFinished(taskid, true);
- }
-
- /**
- * Child checking whether it can commit
- */
- public synchronized boolean canCommit(TaskAttemptID taskid) {
- return commitResponses.contains(taskid); //don't remove it now
- }
-
- /**
- * The task is done.
- */
- public synchronized void done(TaskAttemptID taskid)
- throws IOException {
- ensureAuthorizedJVM(taskid.getJobID());
- TaskInProgress tip = tasks.get(taskid);
- commitResponses.remove(taskid);
- if (tip != null) {
- tip.reportDone();
- } else {
- LOG.warn("Unknown child task done: "+taskid+". Ignored.");
- }
- }
-
-
- /**
- * A reduce-task failed to shuffle the map-outputs. Kill the task.
- */
- public synchronized void shuffleError(TaskAttemptID taskId, String message)
- throws IOException {
- ensureAuthorizedJVM(taskId.getJobID());
- LOG.fatal("Task: " + taskId + " - Killed due to Shuffle Failure: " + message);
- TaskInProgress tip = runningTasks.get(taskId);
- tip.reportDiagnosticInfo("Shuffle Error: " + message);
- purgeTask(tip, true);
- }
-
- /**
- * A child task had a local filesystem error. Kill the task.
- */
- public synchronized void fsError(TaskAttemptID taskId, String message)
- throws IOException {
- ensureAuthorizedJVM(taskId.getJobID());
- internalFsError(taskId, message);
- }
-
- /**
- * Version of fsError() that does not do authorization checks, called by
- * the TaskRunner.
- */
- synchronized void internalFsError(TaskAttemptID taskId, String message)
- throws IOException {
- LOG.fatal("Task: " + taskId + " - Killed due to FSError: " + message);
- TaskInProgress tip = runningTasks.get(taskId);
- tip.reportDiagnosticInfo("FSError: " + message);
- purgeTask(tip, true);
- }
-
- /**
- * A child task had a fatal error. Kill the task.
- */
- public synchronized void fatalError(TaskAttemptID taskId, String msg)
- throws IOException {
- ensureAuthorizedJVM(taskId.getJobID());
- LOG.fatal("Task: " + taskId + " - exited : " + msg);
- TaskInProgress tip = runningTasks.get(taskId);
- tip.reportDiagnosticInfo("Error: " + msg);
- purgeTask(tip, true);
- }
-
- public synchronized MapTaskCompletionEventsUpdate getMapCompletionEvents(
- JobID jobId, int fromEventId, int maxLocs, TaskAttemptID id)
- throws IOException {
- TaskCompletionEvent[]mapEvents = TaskCompletionEvent.EMPTY_ARRAY;
- synchronized (shouldReset) {
- if (shouldReset.remove(id)) {
- return new MapTaskCompletionEventsUpdate(mapEvents, true);
- }
- }
- RunningJob rjob;
- synchronized (runningJobs) {
- rjob = runningJobs.get(jobId);
- if (rjob != null) {
- synchronized (rjob) {
- FetchStatus f = rjob.getFetchStatus();
- if (f != null) {
- mapEvents = f.getMapEvents(fromEventId, maxLocs);
- }
- }
- }
- }
- return new MapTaskCompletionEventsUpdate(mapEvents, false);
- }
-
- /////////////////////////////////////////////////////
- // Called by TaskTracker thread after task process ends
- /////////////////////////////////////////////////////
- /**
- * The task is no longer running. It may not have completed successfully
- */
- void reportTaskFinished(TaskAttemptID taskid, boolean commitPending) {
- TaskInProgress tip;
- synchronized (this) {
- tip = tasks.get(taskid);
- }
- if (tip != null) {
- tip.reportTaskFinished(commitPending);
- } else {
- LOG.warn("Unknown child task finished: "+taskid+". Ignored.");
- }
- }
-
-
- /**
- * A completed map task's output has been lost.
- */
- public synchronized void mapOutputLost(TaskAttemptID taskid,
- String errorMsg) throws IOException {
- TaskInProgress tip = tasks.get(taskid);
- if (tip != null) {
- tip.mapOutputLost(errorMsg);
- } else {
- LOG.warn("Unknown child with bad map output: "+taskid+". Ignored.");
- }
- }
-
- /**
- * The datastructure for initializing a job
- */
- static class RunningJob{
- private JobID jobid;
- private JobConf jobConf;
- // keep this for later use
- volatile Set<TaskInProgress> tasks;
- boolean localized;
- boolean keepJobFiles;
- UserGroupInformation ugi;
- FetchStatus f;
- RunningJob(JobID jobid) {
- this.jobid = jobid;
- localized = false;
- tasks = new HashSet<TaskInProgress>();
- keepJobFiles = false;
- }
-
- JobID getJobID() {
- return jobid;
- }
-
- UserGroupInformation getUGI() {
- return ugi;
- }
-
- void setFetchStatus(FetchStatus f) {
- this.f = f;
- }
-
- FetchStatus getFetchStatus() {
- return f;
- }
-
- JobConf getJobConf() {
- return jobConf;
- }
- }
-
- /**
- * Get the name for this task tracker.
- * @return the string like "tracker_mymachine:50010"
- */
- String getName() {
- return taskTrackerName;
- }
-
- private synchronized List<TaskStatus> cloneAndResetRunningTaskStatuses(
- boolean sendCounters) {
- List<TaskStatus> result = new ArrayList<TaskStatus>(runningTasks.size());
- for(TaskInProgress tip: runningTasks.values()) {
- TaskStatus status = tip.getStatus();
- status.setIncludeAllCounters(sendCounters);
- // send counters for finished or failed tasks and commit pending tasks
- if (status.getRunState() != TaskStatus.State.RUNNING) {
- status.setIncludeAllCounters(true);
- }
- result.add((TaskStatus)status.clone());
- status.clearStatus();
- }
- return result;
- }
- /**
- * Get the list of tasks that will be reported back to the
- * job tracker in the next heartbeat cycle.
- * @return a copy of the list of TaskStatus objects
- */
- synchronized List<TaskStatus> getRunningTaskStatuses() {
- List<TaskStatus> result = new ArrayList<TaskStatus>(runningTasks.size());
- for(TaskInProgress tip: runningTasks.values()) {
- result.add(tip.getStatus());
- }
- return result;
- }
-
- /**
- * Get the list of stored tasks on this task tracker.
- * @return
- */
- synchronized List<TaskStatus> getNonRunningTasks() {
- List<TaskStatus> result = new ArrayList<TaskStatus>(tasks.size());
- for(Map.Entry<TaskAttemptID, TaskInProgress> task: tasks.entrySet()) {
- if (!runningTasks.containsKey(task.getKey())) {
- result.add(task.getValue().getStatus());
- }
- }
- return result;
- }
-
-
- /**
- * Get the list of tasks from running jobs on this task tracker.
- * @return a copy of the list of TaskStatus objects
- */
- synchronized List<TaskStatus> getTasksFromRunningJobs() {
- List<TaskStatus> result = new ArrayList<TaskStatus>(tasks.size());
- for (Map.Entry <JobID, RunningJob> item : runningJobs.entrySet()) {
- RunningJob rjob = item.getValue();
- synchronized (rjob) {
- for (TaskInProgress tip : rjob.tasks) {
- result.add(tip.getStatus());
- }
- }
- }
- return result;
- }
-
- /**
- * Get the default job conf for this tracker.
- */
- JobConf getJobConf() {
- return fConf;
- }
-
- /**
- * Check if the given local directories
- * (and parent directories, if necessary) can be created.
- * @param localDirs where the new TaskTracker should keep its local files.
- * @throws DiskErrorException if all local directories are not writable
- */
- private static void checkLocalDirs(String[] localDirs)
- throws DiskErrorException {
- boolean writable = false;
-
- if (localDirs != null) {
- for (int i = 0; i < localDirs.length; i++) {
- try {
- DiskChecker.checkDir(new File(localDirs[i]));
- writable = true;
- } catch(DiskErrorException e) {
- LOG.warn("Task Tracker local " + e.getMessage());
- }
- }
- }
-
- if (!writable)
- throw new DiskErrorException(
- "all local directories are not writable");
- }
-
- /**
- * Is this task tracker idle?
- * @return has this task tracker finished and cleaned up all of its tasks?
- */
- public synchronized boolean isIdle() {
- return tasks.isEmpty() && tasksToCleanup.isEmpty();
- }
-
- /**
- * Start the TaskTracker, point toward the indicated JobTracker
- */
- public static void main(String argv[]) throws Exception {
- StringUtils.startupShutdownMessage(TaskTracker.class, argv, LOG);
- if (argv.length != 0) {
- System.out.println("usage: TaskTracker");
- System.exit(-1);
- }
- try {
- JobConf conf=new JobConf();
- // enable the server to track time spent waiting on locks
- ReflectionUtils.setContentionTracing
- (conf.getBoolean(TT_CONTENTION_TRACKING, false));
- new TaskTracker(conf).run();
- } catch (Throwable e) {
- LOG.error("Can not start task tracker because "+
- StringUtils.stringifyException(e));
- System.exit(-1);
- }
- }
-
- /**
- * This class is used in TaskTracker's Jetty to serve the map outputs
- * to other nodes.
- */
- @InterfaceAudience.Private
- @InterfaceStability.Unstable
- public static class MapOutputServlet extends HttpServlet {
- private static final long serialVersionUID = 1L;
- private static final int MAX_BYTES_TO_READ = 64 * 1024;
- @Override
- public void doGet(HttpServletRequest request,
- HttpServletResponse response
- ) throws ServletException, IOException {
- long start = System.currentTimeMillis();
- String mapIds = request.getParameter("map");
- String reduceId = request.getParameter("reduce");
- String jobId = request.getParameter("job");
-
- LOG.debug("Shuffle started for maps (mapIds=" + mapIds + ") to reduce " +
- reduceId);
-
- if (jobId == null) {
- throw new IOException("job parameter is required");
- }
-
- if (mapIds == null || reduceId == null) {
- throw new IOException("map and reduce parameters are required");
- }
-
- ServletContext context = getServletContext();
- int reduce = Integer.parseInt(reduceId);
- DataOutputStream outStream = null;
-
- ShuffleServerMetrics shuffleMetrics =
- (ShuffleServerMetrics) context.getAttribute("shuffleServerMetrics");
- TaskTracker tracker =
- (TaskTracker) context.getAttribute("task.tracker");
- String exceptionStackRegex =
- (String) context.getAttribute("exceptionStackRegex");
- String exceptionMsgRegex =
- (String) context.getAttribute("exceptionMsgRegex");
-
- verifyRequest(request, response, tracker, jobId);
-
- int numMaps = 0;
- try {
- shuffleMetrics.serverHandlerBusy();
- response.setContentType("application/octet-stream");
-
- outStream = new DataOutputStream(response.getOutputStream());
- //use the same buffersize as used for reading the data from disk
- response.setBufferSize(MAX_BYTES_TO_READ);
- JobConf conf = (JobConf) context.getAttribute("conf");
- LocalDirAllocator lDirAlloc =
- (LocalDirAllocator)context.getAttribute("localDirAllocator");
- FileSystem rfs = ((LocalFileSystem)
- context.getAttribute("local.file.system")).getRaw();
-
- // Split the map ids, send output for one map at a time
- StringTokenizer itr = new StringTokenizer(mapIds, ",");
- while(itr.hasMoreTokens()) {
- String mapId = itr.nextToken();
- ++numMaps;
- sendMapFile(jobId, mapId, reduce, conf, outStream,
- tracker, lDirAlloc, shuffleMetrics, rfs);
- }
- } catch (IOException ie) {
- Log log = (Log) context.getAttribute("log");
- String errorMsg = ("getMapOutputs(" + mapIds + "," + reduceId +
- ") failed");
- log.warn(errorMsg, ie);
- checkException(ie, exceptionMsgRegex, exceptionStackRegex, shuffleMetrics);
- response.sendError(HttpServletResponse.SC_GONE, errorMsg);
- shuffleMetrics.failedOutput();
- throw ie;
- } finally {
- shuffleMetrics.serverHandlerFree();
- }
- outStream.close();
- shuffleMetrics.successOutput();
- long timeElapsed = (System.currentTimeMillis()-start);
- LOG.info("Shuffled " + numMaps
- + "maps (mapIds=" + mapIds + ") to reduce "
- + reduceId + " in " + timeElapsed + "s");
-
- if (ClientTraceLog.isInfoEnabled()) {
- ClientTraceLog.info(String.format(MR_CLIENTTRACE_FORMAT,
- request.getLocalAddr() + ":" + request.getLocalPort(),
- request.getRemoteAddr() + ":" + request.getRemotePort(),
- numMaps, "MAPRED_SHUFFLE", reduceId,
- timeElapsed));
- }
- }
-
- protected void checkException(IOException ie, String exceptionMsgRegex,
- String exceptionStackRegex, ShuffleServerMetrics shuffleMetrics) {
- // parse exception to see if it looks like a regular expression you
- // configure. If both msgRegex and StackRegex set then make sure both
- // match, otherwise only the one set has to match.
- if (exceptionMsgRegex != null) {
- String msg = ie.getMessage();
- if (msg == null || !msg.matches(exceptionMsgRegex)) {
- return;
- }
- }
- if (exceptionStackRegex != null
- && !checkStackException(ie, exceptionStackRegex)) {
- return;
- }
- shuffleMetrics.exceptionsCaught();
- }
-
- private boolean checkStackException(IOException ie,
- String exceptionStackRegex) {
- StackTraceElement[] stack = ie.getStackTrace();
-
- for (StackTraceElement elem : stack) {
- String stacktrace = elem.toString();
- if (stacktrace.matches(exceptionStackRegex)) {
- return true;
- }
- }
- return false;
- }
-
- private void sendMapFile(String jobId, String mapId,
- int reduce,
- Configuration conf,
- DataOutputStream outStream,
- TaskTracker tracker,
- LocalDirAllocator lDirAlloc,
- ShuffleServerMetrics shuffleMetrics,
- FileSystem localfs
- ) throws IOException {
-
- LOG.debug("sendMapFile called for " + mapId + " to reduce " + reduce);
-
- // true iff IOException was caused by attempt to access input
- boolean isInputException = false;
- FileInputStream mapOutputIn = null;
- byte[] buffer = new byte[MAX_BYTES_TO_READ];
- long totalRead = 0;
-
- String userName = null;
- String runAsUserName = null;
- synchronized (tracker.runningJobs) {
- RunningJob rjob = tracker.runningJobs.get(JobID.forName(jobId));
- if (rjob == null) {
- throw new IOException("Unknown job " + jobId + "!!");
- }
- userName = rjob.jobConf.getUser();
- runAsUserName = tracker.getTaskController().getRunAsUser(rjob.jobConf);
- }
- // Index file
- Path indexFileName =
- lDirAlloc.getLocalPathToRead(TaskTracker.getIntermediateOutputDir(
- userName, jobId, mapId)
- + "/file.out.index", conf);
-
- // Map-output file
- Path mapOutputFileName =
- lDirAlloc.getLocalPathToRead(TaskTracker.getIntermediateOutputDir(
- userName, jobId, mapId)
- + "/file.out", conf);
-
- /**
- * Read the index file to get the information about where the map-output
- * for the given reducer is available.
- */
- IndexRecord info =
- tracker.indexCache.getIndexInformation(mapId, reduce, indexFileName,
- runAsUserName);
-
- try {
- /**
- * Read the data from the single map-output file and
- * send it to the reducer.
- */
- //open the map-output file
- mapOutputIn = SecureIOUtils.openForRead(
- new File(mapOutputFileName.toUri().getPath()), runAsUserName, null);
- //seek to the correct offset for the reduce
- IOUtils.skipFully(mapOutputIn, info.startOffset);
-
- // write header for each map output
- ShuffleHeader header = new ShuffleHeader(mapId, info.partLength,
- info.rawLength, reduce);
- header.write(outStream);
-
- // read the map-output and stream it out
- isInputException = true;
- long rem = info.partLength;
- if (rem == 0) {
- throw new IOException("Illegal partLength of 0 for mapId " + mapId +
- " to reduce " + reduce);
- }
- int len =
- mapOutputIn.read(buffer, 0, (int)Math.min(rem, MAX_BYTES_TO_READ));
- long now = 0;
- while (len >= 0) {
- rem -= len;
- try {
- shuffleMetrics.outputBytes(len);
-
- if (len > 0) {
- outStream.write(buffer, 0, len);
- } else {
- LOG.info("Skipped zero-length read of map " + mapId +
- " to reduce " + reduce);
- }
-
- } catch (IOException ie) {
- isInputException = false;
- throw ie;
- }
- totalRead += len;
- if (rem == 0) {
- break;
- }
- len =
- mapOutputIn.read(buffer, 0, (int)Math.min(rem, MAX_BYTES_TO_READ));
- }
- try {
- outStream.flush();
- } catch (IOException ie) {
- isInputException = false;
- throw ie;
- }
- } catch (IOException ie) {
- String errorMsg = "error on sending map " + mapId + " to reduce " +
- reduce;
- if (isInputException) {
- tracker.mapOutputLost(TaskAttemptID.forName(mapId), errorMsg +
- StringUtils.stringifyException(ie));
- }
- throw new IOException(errorMsg, ie);
- } finally {
- if (mapOutputIn != null) {
- try {
- mapOutputIn.close();
- } catch (IOException ioe) {
- LOG.info("problem closing map output file", ioe);
- }
- }
- }
-
- LOG.info("Sent out " + totalRead + " bytes to reduce " + reduce +
- " from map: " + mapId + " given " + info.partLength + "/" +
- info.rawLength);
- }
-
- /**
- * verify that request has correct HASH for the url
- * and also add a field to reply header with hash of the HASH
- * @param request
- * @param response
- * @param jt the job token
- * @throws IOException
- */
- private void verifyRequest(HttpServletRequest request,
- HttpServletResponse response, TaskTracker tracker, String jobId)
- throws IOException {
- SecretKey tokenSecret = tracker.getJobTokenSecretManager()
- .retrieveTokenSecret(jobId);
- // string to encrypt
- String enc_str = SecureShuffleUtils.buildMsgFrom(request);
-
- // hash from the fetcher
- String urlHashStr = request.getHeader(SecureShuffleUtils.HTTP_HEADER_URL_HASH);
- if(urlHashStr == null) {
- response.sendError(HttpServletResponse.SC_UNAUTHORIZED);
- throw new IOException("fetcher cannot be authenticated");
- }
- int len = urlHashStr.length();
- LOG.debug("verifying request. enc_str="+enc_str+"; hash=..."+
- urlHashStr.substring(len-len/2, len-1)); // half of the hash for debug
-
- // verify - throws exception
- try {
- SecureShuffleUtils.verifyReply(urlHashStr, enc_str, tokenSecret);
- } catch (IOException ioe) {
- response.sendError(HttpServletResponse.SC_UNAUTHORIZED);
- throw ioe;
- }
-
- // verification passed - encode the reply
- String reply = SecureShuffleUtils.generateHash(urlHashStr.getBytes(), tokenSecret);
- response.addHeader(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH, reply);
-
- len = reply.length();
- LOG.debug("Fetcher request verfied. enc_str="+enc_str+";reply="
- +reply.substring(len-len/2, len-1));
- }
- }
-
- // get the full paths of the directory in all the local disks.
- Path[] getLocalFiles(JobConf conf, String subdir) throws IOException{
- String[] localDirs = conf.getLocalDirs();
- Path[] paths = new Path[localDirs.length];
- FileSystem localFs = FileSystem.getLocal(conf);
- boolean subdirNeeded = (subdir != null) && (subdir.length() > 0);
- for (int i = 0; i < localDirs.length; i++) {
- paths[i] = (subdirNeeded) ? new Path(localDirs[i], subdir)
- : new Path(localDirs[i]);
- paths[i] = paths[i].makeQualified(localFs);
- }
- return paths;
- }
-
- FileSystem getLocalFileSystem(){
- return localFs;
- }
-
- // only used by tests
- void setLocalFileSystem(FileSystem fs){
- localFs = fs;
- }
-
- int getMaxCurrentMapTasks() {
- return maxMapSlots;
- }
-
- int getMaxCurrentReduceTasks() {
- return maxReduceSlots;
- }
-
- //called from unit test
- synchronized void setMaxMapSlots(int mapSlots) {
- maxMapSlots = mapSlots;
- }
-
- //called from unit test
- synchronized void setMaxReduceSlots(int reduceSlots) {
- maxReduceSlots = reduceSlots;
- }
-
- /**
- * Is the TaskMemoryManager Enabled on this system?
- * @return true if enabled, false otherwise.
- */
- public boolean isTaskMemoryManagerEnabled() {
- return taskMemoryManagerEnabled;
- }
-
- public TaskMemoryManagerThread getTaskMemoryManager() {
- return taskMemoryManager;
- }
-
- /**
- * Normalize the negative values in configuration
- *
- * @param val
- * @return normalized val
- */
- private long normalizeMemoryConfigValue(long val) {
- if (val < 0) {
- val = JobConf.DISABLED_MEMORY_LIMIT;
- }
- return val;
- }
-
- /**
- * Memory-related setup
- */
- private void initializeMemoryManagement() {
-
- //handling @deprecated
- if (fConf.get(MAPRED_TASKTRACKER_VMEM_RESERVED_PROPERTY) != null) {
- LOG.warn(
- JobConf.deprecatedString(
- MAPRED_TASKTRACKER_VMEM_RESERVED_PROPERTY));
- }
-
- //handling @deprecated
- if (fConf.get(MAPRED_TASKTRACKER_PMEM_RESERVED_PROPERTY) != null) {
- LOG.warn(
- JobConf.deprecatedString(
- MAPRED_TASKTRACKER_PMEM_RESERVED_PROPERTY));
- }
-
- //handling @deprecated
- if (fConf.get(JobConf.MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY) != null) {
- LOG.warn(
- JobConf.deprecatedString(
- JobConf.MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY));
- }
-
- //handling @deprecated
- if (fConf.get(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY) != null) {
- LOG.warn(
- JobConf.deprecatedString(
- JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY));
- }
-
- // Use TT_MEMORY_CALCULATOR_PLUGIN if it is configured.
- Class<? extends MemoryCalculatorPlugin> clazz =
- fConf.getClass(TT_MEMORY_CALCULATOR_PLUGIN,
- null, MemoryCalculatorPlugin.class);
- MemoryCalculatorPlugin memoryCalculatorPlugin = (clazz == null ?
- null : MemoryCalculatorPlugin.getMemoryCalculatorPlugin(clazz, fConf));
- if (memoryCalculatorPlugin != null || resourceCalculatorPlugin != null) {
- totalVirtualMemoryOnTT = (memoryCalculatorPlugin == null ?
- resourceCalculatorPlugin.getVirtualMemorySize() :
- memoryCalculatorPlugin.getVirtualMemorySize());
- if (totalVirtualMemoryOnTT <= 0) {
- LOG.warn("TaskTracker's totalVmem could not be calculated. "
- + "Setting it to " + JobConf.DISABLED_MEMORY_LIMIT);
- totalVirtualMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT;
- }
- totalPhysicalMemoryOnTT = (memoryCalculatorPlugin == null ?
- resourceCalculatorPlugin.getPhysicalMemorySize() :
- memoryCalculatorPlugin.getPhysicalMemorySize());
- if (totalPhysicalMemoryOnTT <= 0) {
- LOG.warn("TaskTracker's totalPmem could not be calculated. "
- + "Setting it to " + JobConf.DISABLED_MEMORY_LIMIT);
- totalPhysicalMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT;
- }
- }
-
- mapSlotMemorySizeOnTT =
- fConf.getLong(
- MAPMEMORY_MB,
- JobConf.DISABLED_MEMORY_LIMIT);
- reduceSlotSizeMemoryOnTT =
- fConf.getLong(
- REDUCEMEMORY_MB,
- JobConf.DISABLED_MEMORY_LIMIT);
- totalMemoryAllottedForTasks =
- maxMapSlots * mapSlotMemorySizeOnTT + maxReduceSlots
- * reduceSlotSizeMemoryOnTT;
- if (totalMemoryAllottedForTasks < 0) {
- //adding check for the old keys which might be used by the administrator
- //while configuration of the memory monitoring on TT
- long memoryAllotedForSlot = fConf.normalizeMemoryConfigValue(
- fConf.getLong(JobConf.MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY,
- JobConf.DISABLED_MEMORY_LIMIT));
- long limitVmPerTask = fConf.normalizeMemoryConfigValue(
- fConf.getLong(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY,
- JobConf.DISABLED_MEMORY_LIMIT));
- if(memoryAllotedForSlot == JobConf.DISABLED_MEMORY_LIMIT) {
- totalMemoryAllottedForTasks = JobConf.DISABLED_MEMORY_LIMIT;
- } else {
- if(memoryAllotedForSlot > limitVmPerTask) {
- LOG.info("DefaultMaxVmPerTask is mis-configured. " +
- "It shouldn't be greater than task limits");
- totalMemoryAllottedForTasks = JobConf.DISABLED_MEMORY_LIMIT;
- } else {
- totalMemoryAllottedForTasks = (maxMapSlots +
- maxReduceSlots) * (memoryAllotedForSlot/(1024 * 1024));
- }
- }
- }
- if (totalMemoryAllottedForTasks > totalPhysicalMemoryOnTT) {
- LOG.info("totalMemoryAllottedForTasks > totalPhysicalMemoryOnTT."
- + " Thrashing might happen.");
- } else if (totalMemoryAllottedForTasks > totalVirtualMemoryOnTT) {
- LOG.info("totalMemoryAllottedForTasks > totalVirtualMemoryOnTT."
- + " Thrashing might happen.");
- }
-
- reservedPhysicalMemoryOnTT =
- fConf.getLong(TTConfig.TT_RESERVED_PHYSCIALMEMORY_MB,
- JobConf.DISABLED_MEMORY_LIMIT);
- reservedPhysicalMemoryOnTT =
- reservedPhysicalMemoryOnTT == JobConf.DISABLED_MEMORY_LIMIT ?
- JobConf.DISABLED_MEMORY_LIMIT :
- reservedPhysicalMemoryOnTT * 1024 * 1024; // normalize to bytes
-
- // start the taskMemoryManager thread only if enabled
- setTaskMemoryManagerEnabledFlag();
- if (isTaskMemoryManagerEnabled()) {
- taskMemoryManager = new TaskMemoryManagerThread(this);
- taskMemoryManager.setDaemon(true);
- taskMemoryManager.start();
- }
- }
-
- void setTaskMemoryManagerEnabledFlag() {
- if (!ProcfsBasedProcessTree.isAvailable()) {
- LOG.info("ProcessTree implementation is missing on this system. "
- + "TaskMemoryManager is disabled.");
- taskMemoryManagerEnabled = false;
- return;
- }
-
- if (reservedPhysicalMemoryOnTT == JobConf.DISABLED_MEMORY_LIMIT
- && totalMemoryAllottedForTasks == JobConf.DISABLED_MEMORY_LIMIT) {
- taskMemoryManagerEnabled = false;
- LOG.warn("TaskTracker's totalMemoryAllottedForTasks is -1 and " +
- "reserved physical memory is not configured. " +
- "TaskMemoryManager is disabled.");
- return;
- }
-
- taskMemoryManagerEnabled = true;
- }
-
- /**
- * Clean-up the task that TaskMemoryMangerThread requests to do so.
- * @param tid
- * @param wasFailure mark the task as failed or killed. 'failed' if true,
- * 'killed' otherwise
- * @param diagnosticMsg
- */
- synchronized void cleanUpOverMemoryTask(TaskAttemptID tid, boolean wasFailure,
- String diagnosticMsg) {
- TaskInProgress tip = runningTasks.get(tid);
- if (tip != null) {
- tip.reportDiagnosticInfo(diagnosticMsg);
- try {
- purgeTask(tip, wasFailure); // Marking it as failed/killed.
- } catch (IOException ioe) {
- LOG.warn("Couldn't purge the task of " + tid + ". Error : " + ioe);
- }
- }
- }
-
- /**
- * Wrapper method used by TaskTracker to check if {@link NodeHealthCheckerService}
- * can be started
- * @param conf configuration used to check if service can be started
- * @return true if service can be started
- */
- private boolean shouldStartHealthMonitor(Configuration conf) {
- return NodeHealthCheckerService.shouldRun(conf);
- }
-
- /**
- * Wrapper method used to start {@link NodeHealthCheckerService} for
- * Task Tracker
- * @param conf Configuration used by the service.
- */
- private void startHealthMonitor(Configuration conf) {
- healthChecker = new NodeHealthCheckerService(conf);
- healthChecker.start();
- }
-
- TrackerDistributedCacheManager getTrackerDistributedCacheManager() {
- return distributedCacheManager;
- }
-
- /**
- * Download the job-token file from the FS and save on local fs.
- * @param user
- * @param jobId
- * @param jobConf
- * @return the local file system path of the downloaded file.
- * @throws IOException
- */
- private String localizeJobTokenFile(String user, JobID jobId)
- throws IOException {
- // check if the tokenJob file is there..
- Path skPath = new Path(systemDirectory,
- jobId.toString()+"/"+TokenCache.JOB_TOKEN_HDFS_FILE);
-
- FileStatus status = null;
- long jobTokenSize = -1;
- status = systemFS.getFileStatus(skPath); //throws FileNotFoundException
- jobTokenSize = status.getLen();
-
- Path localJobTokenFile =
- lDirAlloc.getLocalPathForWrite(getLocalJobTokenFile(user,
- jobId.toString()), jobTokenSize, fConf);
- String localJobTokenFileStr = localJobTokenFile.toUri().getPath();
- LOG.debug("localizingJobTokenFile from sd="+skPath.toUri().getPath() +
- " to " + localJobTokenFileStr);
-
- // Download job_token
- systemFS.copyToLocalFile(skPath, localJobTokenFile);
- return localJobTokenFileStr;
- }
-
- JobACLsManager getJobACLsManager() {
- return aclsManager.getJobACLsManager();
- }
-
- ACLsManager getACLsManager() {
- return aclsManager;
- }
-
- synchronized TaskInProgress getRunningTask(TaskAttemptID tid) {
- return runningTasks.get(tid);
- }
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/Cluster.java b/mapreduce/src/java/org/apache/hadoop/mapreduce/Cluster.java
deleted file mode 100644
index 23b3505..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapreduce/Cluster.java
+++ /dev/null
@@ -1,407 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.ServiceLoader;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapreduce.jobhistory.JobHistory;
-import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
-import org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider;
-import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
-import org.apache.hadoop.mapreduce.server.jobtracker.State;
-import org.apache.hadoop.mapreduce.util.ConfigUtil;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
-
-/**
- * Provides a way to access information about the map/reduce cluster.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public class Cluster {
-
- @InterfaceStability.Evolving
- public static enum JobTrackerStatus {INITIALIZING, RUNNING};
-
- private ClientProtocolProvider clientProtocolProvider;
- private ClientProtocol client;
- private UserGroupInformation ugi;
- private Configuration conf;
- private FileSystem fs = null;
- private Path sysDir = null;
- private Path stagingAreaDir = null;
- private Path jobHistoryDir = null;
-
- static {
- ConfigUtil.loadResources();
- }
-
- public Cluster(Configuration conf) throws IOException {
- this.conf = conf;
- this.ugi = UserGroupInformation.getCurrentUser();
- for (ClientProtocolProvider provider : ServiceLoader.load(ClientProtocolProvider.class)) {
- ClientProtocol clientProtocol = provider.create(conf);
- if (clientProtocol != null) {
- clientProtocolProvider = provider;
- client = clientProtocol;
- break;
- }
- }
- }
-
- public Cluster(InetSocketAddress jobTrackAddr, Configuration conf)
- throws IOException {
- this.conf = conf;
- this.ugi = UserGroupInformation.getCurrentUser();
- for (ClientProtocolProvider provider : ServiceLoader.load(ClientProtocolProvider.class)) {
- ClientProtocol clientProtocol = provider.create(jobTrackAddr, conf);
- if (clientProtocol != null) {
- clientProtocolProvider = provider;
- client = clientProtocol;
- break;
- }
- }
- }
-
- ClientProtocol getClient() {
- return client;
- }
-
- Configuration getConf() {
- return conf;
- }
-
- /**
- * Close the <code>Cluster</code>.
- */
- public synchronized void close() throws IOException {
- clientProtocolProvider.close(client);
- }
-
- private Job[] getJobs(JobStatus[] stats) throws IOException {
- List<Job> jobs = new ArrayList<Job>();
- for (JobStatus stat : stats) {
- jobs.add(new Job(this, stat, new JobConf(stat.getJobFile())));
- }
- return jobs.toArray(new Job[0]);
- }
-
- /**
- * Get the file system where job-specific files are stored
- *
- * @return object of FileSystem
- * @throws IOException
- * @throws InterruptedException
- */
- public synchronized FileSystem getFileSystem()
- throws IOException, InterruptedException {
- if (this.fs == null) {
- try {
- this.fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
- public FileSystem run() throws IOException, InterruptedException {
- final Path sysDir = new Path(client.getSystemDir());
- return sysDir.getFileSystem(getConf());
- }
- });
- } catch (InterruptedException e) {
- throw new RuntimeException(e);
- }
- }
- return fs;
- }
-
- /**
- * Get job corresponding to jobid.
- *
- * @param jobId
- * @return object of {@link Job}
- * @throws IOException
- * @throws InterruptedException
- */
- public Job getJob(JobID jobId) throws IOException, InterruptedException {
- JobStatus status = client.getJobStatus(jobId);
- if (status != null) {
- return new Job(this, status, new JobConf(status.getJobFile()));
- }
- return null;
- }
-
- /**
- * Get all the queues in cluster.
- *
- * @return array of {@link QueueInfo}
- * @throws IOException
- * @throws InterruptedException
- */
- public QueueInfo[] getQueues() throws IOException, InterruptedException {
- return client.getQueues();
- }
-
- /**
- * Get queue information for the specified name.
- *
- * @param name queuename
- * @return object of {@link QueueInfo}
- * @throws IOException
- * @throws InterruptedException
- */
- public QueueInfo getQueue(String name)
- throws IOException, InterruptedException {
- return client.getQueue(name);
- }
-
- /**
- * Get current cluster status.
- *
- * @return object of {@link ClusterMetrics}
- * @throws IOException
- * @throws InterruptedException
- */
- public ClusterMetrics getClusterStatus() throws IOException, InterruptedException {
- return client.getClusterMetrics();
- }
-
- /**
- * Get all active trackers in the cluster.
- *
- * @return array of {@link TaskTrackerInfo}
- * @throws IOException
- * @throws InterruptedException
- */
- public TaskTrackerInfo[] getActiveTaskTrackers()
- throws IOException, InterruptedException {
- return client.getActiveTrackers();
- }
-
- /**
- * Get blacklisted trackers.
- *
- * @return array of {@link TaskTrackerInfo}
- * @throws IOException
- * @throws InterruptedException
- */
- public TaskTrackerInfo[] getBlackListedTaskTrackers()
- throws IOException, InterruptedException {
- return client.getBlacklistedTrackers();
- }
-
- /**
- * Get all the jobs in cluster.
- *
- * @return array of {@link Job}
- * @throws IOException
- * @throws InterruptedException
- * @deprecated Use {@link #getAllJobStatuses()} instead.
- */
- @Deprecated
- public Job[] getAllJobs() throws IOException, InterruptedException {
- return getJobs(client.getAllJobs());
- }
-
- /**
- * Get job status for all jobs in the cluster.
- * @return job status for all jobs in cluster
- * @throws IOException
- * @throws InterruptedException
- */
- public JobStatus[] getAllJobStatuses() throws IOException, InterruptedException {
- return client.getAllJobs();
- }
-
- /**
- * Grab the jobtracker system directory path where
- * job-specific files will be placed.
- *
- * @return the system directory where job-specific files are to be placed.
- */
- public Path getSystemDir() throws IOException, InterruptedException {
- if (sysDir == null) {
- sysDir = new Path(client.getSystemDir());
- }
- return sysDir;
- }
-
- /**
- * Grab the jobtracker's view of the staging directory path where
- * job-specific files will be placed.
- *
- * @return the staging directory where job-specific files are to be placed.
- */
- public Path getStagingAreaDir() throws IOException, InterruptedException {
- if (stagingAreaDir == null) {
- stagingAreaDir = new Path(client.getStagingAreaDir());
- }
- return stagingAreaDir;
- }
-
- /**
- * Get the job history file path for a given job id. The job history file at
- * this path may or may not be existing depending on the job completion state.
- * The file is present only for the completed jobs.
- * @param jobId the JobID of the job submitted by the current user.
- * @return the file path of the job history file
- * @throws IOException
- * @throws InterruptedException
- */
- public String getJobHistoryUrl(JobID jobId) throws IOException,
- InterruptedException {
- if (jobHistoryDir == null) {
- jobHistoryDir = new Path(client.getJobHistoryDir());
- }
- return JobHistory.getJobHistoryFile(jobHistoryDir, jobId,
- ugi.getShortUserName()).toString();
- }
-
- /**
- * Gets the Queue ACLs for current user
- * @return array of QueueAclsInfo object for current user.
- * @throws IOException
- */
- public QueueAclsInfo[] getQueueAclsForCurrentUser()
- throws IOException, InterruptedException {
- return client.getQueueAclsForCurrentUser();
- }
-
- /**
- * Gets the root level queues.
- * @return array of JobQueueInfo object.
- * @throws IOException
- */
- public QueueInfo[] getRootQueues() throws IOException, InterruptedException {
- return client.getRootQueues();
- }
-
- /**
- * Returns immediate children of queueName.
- * @param queueName
- * @return array of JobQueueInfo which are children of queueName
- * @throws IOException
- */
- public QueueInfo[] getChildQueues(String queueName)
- throws IOException, InterruptedException {
- return client.getChildQueues(queueName);
- }
-
- /**
- * Get JobTracker's state
- *
- * @return {@link State} of the JobTracker
- * @throws IOException
- * @throws InterruptedException
- * @deprecated Use {@link #getJobTrackerStatus()} instead.
- */
- @Deprecated
- public State getJobTrackerState() throws IOException, InterruptedException {
- return client.getJobTrackerState();
- }
-
- /**
- * Get the JobTracker's status.
- *
- * @return {@link JobTrackerStatus} of the JobTracker
- * @throws IOException
- * @throws InterruptedException
- */
- public JobTrackerStatus getJobTrackerStatus() throws IOException,
- InterruptedException {
- return client.getJobTrackerStatus();
- }
-
- /**
- * Get the tasktracker expiry interval for the cluster
- * @return the expiry interval in msec
- */
- public long getTaskTrackerExpiryInterval() throws IOException,
- InterruptedException {
- return client.getTaskTrackerExpiryInterval();
- }
-
- /**
- * Get a delegation token for the user from the JobTracker.
- * @param renewer the user who can renew the token
- * @return the new token
- * @throws IOException
- */
- public Token<DelegationTokenIdentifier>
- getDelegationToken(Text renewer) throws IOException, InterruptedException{
- Token<DelegationTokenIdentifier> result =
- client.getDelegationToken(renewer);
- InetSocketAddress addr = NetUtils.createSocketAddr(
- conf.get(JTConfig.JT_IPC_ADDRESS, "localhost:8012"));
- StringBuilder service = new StringBuilder();
- service.append(NetUtils.normalizeHostName(addr.getAddress().
- getHostAddress()));
- service.append(':');
- service.append(addr.getPort());
- result.setService(new Text(service.toString()));
- return result;
- }
-
- /**
- * Renew a delegation token
- * @param token the token to renew
- * @return the new expiration time
- * @throws InvalidToken
- * @throws IOException
- */
- public long renewDelegationToken(Token<DelegationTokenIdentifier> token
- ) throws InvalidToken, IOException,
- InterruptedException {
- try {
- return client.renewDelegationToken(token);
- } catch (RemoteException re) {
- throw re.unwrapRemoteException(InvalidToken.class,
- AccessControlException.class);
- }
- }
-
- /**
- * Cancel a delegation token from the JobTracker
- * @param token the token to cancel
- * @throws IOException
- */
- public void cancelDelegationToken(Token<DelegationTokenIdentifier> token
- ) throws IOException,
- InterruptedException {
- try {
- client.cancelDelegationToken(token);
- } catch (RemoteException re) {
- throw re.unwrapRemoteException(InvalidToken.class,
- AccessControlException.class);
- }
- }
-
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/JobCounter.java b/mapreduce/src/java/org/apache/hadoop/mapreduce/JobCounter.java
deleted file mode 100644
index 819398f..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapreduce/JobCounter.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-// Per-job counters
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public enum JobCounter {
- NUM_FAILED_MAPS,
- NUM_FAILED_REDUCES,
- TOTAL_LAUNCHED_MAPS,
- TOTAL_LAUNCHED_REDUCES,
- OTHER_LOCAL_MAPS,
- DATA_LOCAL_MAPS,
- RACK_LOCAL_MAPS,
- SLOTS_MILLIS_MAPS,
- SLOTS_MILLIS_REDUCES,
- FALLOW_SLOTS_MILLIS_MAPS,
- FALLOW_SLOTS_MILLIS_REDUCES
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/MRConfig.java b/mapreduce/src/java/org/apache/hadoop/mapreduce/MRConfig.java
deleted file mode 100644
index 6f4a162..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapreduce/MRConfig.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapreduce;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.mapred.JobTracker;
-import org.apache.hadoop.mapred.TaskTracker;
-
-/**
- * Place holder for cluster level configuration keys.
- *
- * These keys are used by both {@link JobTracker} and {@link TaskTracker}. The
- * keys should have "mapreduce.cluster." as the prefix.
- *
- */
-@InterfaceAudience.Private
-public interface MRConfig {
-
- // Cluster-level configuration parameters
- public static final String TEMP_DIR = "mapreduce.cluster.temp.dir";
- public static final String LOCAL_DIR = "mapreduce.cluster.local.dir";
- public static final String MAPMEMORY_MB = "mapreduce.cluster.mapmemory.mb";
- public static final String REDUCEMEMORY_MB =
- "mapreduce.cluster.reducememory.mb";
- public static final String MR_ACLS_ENABLED = "mapreduce.cluster.acls.enabled";
- public static final String MR_ADMINS =
- "mapreduce.cluster.administrators";
- @Deprecated
- public static final String MR_SUPERGROUP =
- "mapreduce.cluster.permissions.supergroup";
-
- //Delegation token related keys
- public static final String DELEGATION_KEY_UPDATE_INTERVAL_KEY =
- "mapreduce.cluster.delegation.key.update-interval";
- public static final long DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT =
- 24*60*60*1000; // 1 day
- public static final String DELEGATION_TOKEN_RENEW_INTERVAL_KEY =
- "mapreduce.cluster.delegation.token.renew-interval";
- public static final long DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT =
- 24*60*60*1000; // 1 day
- public static final String DELEGATION_TOKEN_MAX_LIFETIME_KEY =
- "mapreduce.cluster.delegation.token.max-lifetime";
- public static final long DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT =
- 7*24*60*60*1000; // 7 days
-
- public static final String FRAMEWORK_NAME = "mapreduce.framework.name";
- public static final String TASK_LOCAL_OUTPUT_CLASS =
- "mapreduce.task.local.output.class";
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/mapreduce/src/java/org/apache/hadoop/mapreduce/MRJobConfig.java
deleted file mode 100644
index c3c8349..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ /dev/null
@@ -1,290 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapreduce;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public interface MRJobConfig {
-
- // Put all of the attribute names in here so that Job and JobContext are
- // consistent.
- public static final String INPUT_FORMAT_CLASS_ATTR = "mapreduce.job.inputformat.class";
-
- public static final String MAP_CLASS_ATTR = "mapreduce.job.map.class";
-
- public static final String COMBINE_CLASS_ATTR = "mapreduce.job.combine.class";
-
- public static final String REDUCE_CLASS_ATTR = "mapreduce.job.reduce.class";
-
- public static final String OUTPUT_FORMAT_CLASS_ATTR = "mapreduce.job.outputformat.class";
-
- public static final String PARTITIONER_CLASS_ATTR = "mapreduce.job.partitioner.class";
-
- public static final String SETUP_CLEANUP_NEEDED = "mapreduce.job.committer.setup.cleanup.needed";
-
- public static final String TASK_CLEANUP_NEEDED = "mapreduce.job.committer.task.cleanup.needed";
-
- public static final String JAR = "mapreduce.job.jar";
-
- public static final String ID = "mapreduce.job.id";
-
- public static final String JOB_NAME = "mapreduce.job.name";
-
- public static final String JAR_UNPACK_PATTERN = "mapreduce.job.jar.unpack.pattern";
-
- public static final String USER_NAME = "mapreduce.job.user.name";
-
- public static final String PRIORITY = "mapreduce.job.priority";
-
- public static final String QUEUE_NAME = "mapreduce.job.queuename";
-
- public static final String JVM_NUMTASKS_TORUN = "mapreduce.job.jvm.numtasks";
-
- public static final String SPLIT_FILE = "mapreduce.job.splitfile";
-
- public static final String NUM_MAPS = "mapreduce.job.maps";
-
- public static final String MAX_TASK_FAILURES_PER_TRACKER = "mapreduce.job.maxtaskfailures.per.tracker";
-
- public static final String COMPLETED_MAPS_FOR_REDUCE_SLOWSTART = "mapreduce.job.reduce.slowstart.completedmaps";
-
- public static final String NUM_REDUCES = "mapreduce.job.reduces";
-
- public static final String SKIP_RECORDS = "mapreduce.job.skiprecords";
-
- public static final String SKIP_OUTDIR = "mapreduce.job.skip.outdir";
-
- public static final String SPECULATIVE_SLOWNODE_THRESHOLD = "mapreduce.job.speculative.slownodethreshold";
-
- public static final String SPECULATIVE_SLOWTASK_THRESHOLD = "mapreduce.job.speculative.slowtaskthreshold";
-
- public static final String SPECULATIVECAP = "mapreduce.job.speculative.speculativecap";
-
- public static final String JOB_LOCAL_DIR = "mapreduce.job.local.dir";
-
- public static final String OUTPUT_KEY_CLASS = "mapreduce.job.output.key.class";
-
- public static final String OUTPUT_VALUE_CLASS = "mapreduce.job.output.value.class";
-
- public static final String KEY_COMPARATOR = "mapreduce.job.output.key.comparator.class";
-
- public static final String GROUP_COMPARATOR_CLASS = "mapreduce.job.output.group.comparator.class";
-
- public static final String WORKING_DIR = "mapreduce.job.working.dir";
-
- public static final String END_NOTIFICATION_URL = "mapreduce.job.end-notification.url";
-
- public static final String END_NOTIFICATION_RETRIES = "mapreduce.job.end-notification.retry.attempts";
-
- public static final String END_NOTIFICATION_RETRIE_INTERVAL = "mapreduce.job.end-notification.retry.interval";
-
- public static final String CLASSPATH_ARCHIVES = "mapreduce.job.classpath.archives";
-
- public static final String CLASSPATH_FILES = "mapreduce.job.classpath.files";
-
- public static final String CACHE_FILES = "mapreduce.job.cache.files";
-
- public static final String CACHE_ARCHIVES = "mapreduce.job.cache.archives";
-
- public static final String CACHE_FILES_SIZES = "mapreduce.job.cache.files.filesizes"; // internal use only
-
- public static final String CACHE_ARCHIVES_SIZES = "mapreduce.job.cache.archives.filesizes"; // ditto
-
- public static final String CACHE_LOCALFILES = "mapreduce.job.cache.local.files";
-
- public static final String CACHE_LOCALARCHIVES = "mapreduce.job.cache.local.archives";
-
- public static final String CACHE_FILE_TIMESTAMPS = "mapreduce.job.cache.files.timestamps";
-
- public static final String CACHE_ARCHIVES_TIMESTAMPS = "mapreduce.job.cache.archives.timestamps";
-
- public static final String CACHE_FILE_VISIBILITIES = "mapreduce.job.cache.files.visibilities";
-
- public static final String CACHE_ARCHIVES_VISIBILITIES = "mapreduce.job.cache.archives.visibilities";
-
- public static final String CACHE_SYMLINK = "mapreduce.job.cache.symlink.create";
-
- public static final String USER_LOG_RETAIN_HOURS = "mapreduce.job.userlog.retain.hours";
-
- public static final String IO_SORT_FACTOR = "mapreduce.task.io.sort.factor";
-
- public static final String IO_SORT_MB = "mapreduce.task.io.sort.mb";
-
- public static final String INDEX_CACHE_MEMORY_LIMIT = "mapreduce.task.index.cache.limit.bytes";
-
- public static final String PRESERVE_FAILED_TASK_FILES = "mapreduce.task.files.preserve.failedtasks";
-
- public static final String PRESERVE_FILES_PATTERN = "mapreduce.task.files.preserve.filepattern";
-
- public static final String TASK_TEMP_DIR = "mapreduce.task.tmp.dir";
-
- public static final String TASK_DEBUGOUT_LINES = "mapreduce.task.debugout.lines";
-
- public static final String RECORDS_BEFORE_PROGRESS = "mapreduce.task.merge.progress.records";
-
- public static final String SKIP_START_ATTEMPTS = "mapreduce.task.skip.start.attempts";
-
- public static final String TASK_ATTEMPT_ID = "mapreduce.task.attempt.id";
-
- public static final String TASK_ISMAP = "mapreduce.task.ismap";
-
- public static final String TASK_PARTITION = "mapreduce.task.partition";
-
- public static final String TASK_PROFILE = "mapreduce.task.profile";
-
- public static final String TASK_PROFILE_PARAMS = "mapreduce.task.profile.params";
-
- public static final String NUM_MAP_PROFILES = "mapreduce.task.profile.maps";
-
- public static final String NUM_REDUCE_PROFILES = "mapreduce.task.profile.reduces";
-
- public static final String TASK_TIMEOUT = "mapreduce.task.timeout";
-
- public static final String TASK_ID = "mapreduce.task.id";
-
- public static final String TASK_OUTPUT_DIR = "mapreduce.task.output.dir";
-
- public static final String TASK_USERLOG_LIMIT = "mapreduce.task.userlog.limit.kb";
-
- public static final String MAP_SORT_SPILL_PERCENT = "mapreduce.map.sort.spill.percent";
-
- public static final String MAP_INPUT_FILE = "mapreduce.map.input.file";
-
- public static final String MAP_INPUT_PATH = "mapreduce.map.input.length";
-
- public static final String MAP_INPUT_START = "mapreduce.map.input.start";
-
- public static final String MAP_MEMORY_MB = "mapreduce.map.memory.mb";
-
- public static final String MAP_MEMORY_PHYSICAL_MB = "mapreduce.map.memory.physical.mb";
-
- public static final String MAP_ENV = "mapreduce.map.env";
-
- public static final String MAP_JAVA_OPTS = "mapreduce.map.java.opts";
-
- public static final String MAP_ULIMIT = "mapreduce.map.ulimit";
-
- public static final String MAP_MAX_ATTEMPTS = "mapreduce.map.maxattempts";
-
- public static final String MAP_DEBUG_SCRIPT = "mapreduce.map.debug.script";
-
- public static final String MAP_SPECULATIVE = "mapreduce.map.speculative";
-
- public static final String MAP_FAILURES_MAX_PERCENT = "mapreduce.map.failures.maxpercent";
-
- public static final String MAP_SKIP_INCR_PROC_COUNT = "mapreduce.map.skip.proc-count.auto-incr";
-
- public static final String MAP_SKIP_MAX_RECORDS = "mapreduce.map.skip.maxrecords";
-
- public static final String MAP_COMBINE_MIN_SPILLS = "mapreduce.map.combine.minspills";
-
- public static final String MAP_OUTPUT_COMPRESS = "mapreduce.map.output.compress";
-
- public static final String MAP_OUTPUT_COMPRESS_CODEC = "mapreduce.map.output.compress.codec";
-
- public static final String MAP_OUTPUT_KEY_CLASS = "mapreduce.map.output.key.class";
-
- public static final String MAP_OUTPUT_VALUE_CLASS = "mapreduce.map.output.value.class";
-
- public static final String MAP_OUTPUT_KEY_FIELD_SEPERATOR = "mapreduce.map.output.key.field.separator";
-
- public static final String MAP_LOG_LEVEL = "mapreduce.map.log.level";
-
- public static final String REDUCE_LOG_LEVEL = "mapreduce.reduce.log.level";
-
- public static final String REDUCE_MERGE_INMEM_THRESHOLD = "mapreduce.reduce.merge.inmem.threshold";
-
- public static final String REDUCE_INPUT_BUFFER_PERCENT = "mapreduce.reduce.input.buffer.percent";
-
- public static final String REDUCE_MARKRESET_BUFFER_PERCENT = "mapreduce.reduce.markreset.buffer.percent";
-
- public static final String REDUCE_MARKRESET_BUFFER_SIZE = "mapreduce.reduce.markreset.buffer.size";
-
- public static final String REDUCE_MEMORY_PHYSICAL_MB = "mapreduce.reduce.memory.physical.mb";
-
- public static final String REDUCE_MEMORY_MB = "mapreduce.reduce.memory.mb";
-
- public static final String REDUCE_MEMORY_TOTAL_BYTES = "mapreduce.reduce.memory.totalbytes";
-
- public static final String SHUFFLE_INPUT_BUFFER_PERCENT = "mapreduce.reduce.shuffle.input.buffer.percent";
-
- public static final String SHUFFLE_MERGE_EPRCENT = "mapreduce.reduce.shuffle.merge.percent";
-
- public static final String REDUCE_FAILURES_MAXPERCENT = "mapreduce.reduce.failures.maxpercent";
-
- public static final String REDUCE_ENV = "mapreduce.reduce.env";
-
- public static final String REDUCE_JAVA_OPTS = "mapreduce.reduce.java.opts";
-
- public static final String REDUCE_ULIMIT = "mapreduce.reduce.ulimit";
-
- public static final String REDUCE_MAX_ATTEMPTS = "mapreduce.reduce.maxattempts";
-
- public static final String SHUFFLE_PARALLEL_COPIES = "mapreduce.reduce.shuffle.parallelcopies";
-
- public static final String REDUCE_DEBUG_SCRIPT = "mapreduce.reduce.debug.script";
-
- public static final String REDUCE_SPECULATIVE = "mapreduce.reduce.speculative";
-
- public static final String SHUFFLE_CONNECT_TIMEOUT = "mapreduce.reduce.shuffle.connect.timeout";
-
- public static final String SHUFFLE_READ_TIMEOUT = "mapreduce.reduce.shuffle.read.timeout";
-
- public static final String SHUFFLE_FETCH_FAILURES = "mapreduce.reduce.shuffle.maxfetchfailures";
-
- public static final String SHUFFLE_NOTIFY_READERROR = "mapreduce.reduce.shuffle.notify.readerror";
-
- public static final String REDUCE_SKIP_INCR_PROC_COUNT = "mapreduce.reduce.skip.proc-count.auto-incr";
-
- public static final String REDUCE_SKIP_MAXGROUPS = "mapreduce.reduce.skip.maxgroups";
-
- public static final String REDUCE_MEMTOMEM_THRESHOLD = "mapreduce.reduce.merge.memtomem.threshold";
-
- public static final String REDUCE_MEMTOMEM_ENABLED = "mapreduce.reduce.merge.memtomem.enabled";
-
- public static final String COMBINE_RECORDS_BEFORE_PROGRESS = "mapreduce.task.combine.progress.records";
-
- public static final String JOB_NAMENODES = "mapreduce.job.hdfs-servers";
-
- public static final String JOB_JOBTRACKER_ID = "mapreduce.job.kerberos.jtprinicipal";
-
- public static final String JOB_CANCEL_DELEGATION_TOKEN = "mapreduce.job.complete.cancel.delegation.tokens";
-
- public static final String JOB_ACL_VIEW_JOB = "mapreduce.job.acl-view-job";
-
- public static final String JOB_ACL_MODIFY_JOB = "mapreduce.job.acl-modify-job";
- public static final String JOB_SUBMITHOST =
- "mapreduce.job.submithostname";
- public static final String JOB_SUBMITHOSTADDR =
- "mapreduce.job.submithostaddress";
-
- public static final String COUNTERS_MAX_KEY = "mapreduce.job.counters.max";
- public static final int COUNTERS_MAX_DEFAULT = 120;
-
- public static final String COUNTER_GROUP_NAME_MAX_KEY = "mapreduce.job.counters.group.name.max";
- public static final int COUNTER_GROUP_NAME_MAX_DEFAULT = 128;
-
- public static final String COUNTER_NAME_MAX_KEY = "mapreduce.job.counters.counter.name.max";
- public static final int COUNTER_NAME_MAX_DEFAULT = 64;
-
- public static final String COUNTER_GROUPS_MAX_KEY = "mapreduce.job.counters.groups.max";
- public static final int COUNTER_GROUPS_MAX_DEFAULT = 50;
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/filecache/DistributedCache.java b/mapreduce/src/java/org/apache/hadoop/mapreduce/filecache/DistributedCache.java
deleted file mode 100644
index 35613f0..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapreduce/filecache/DistributedCache.java
+++ /dev/null
@@ -1,428 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce.filecache;
-
-import java.io.*;
-import java.util.*;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.*;
-import org.apache.hadoop.util.*;
-import org.apache.hadoop.fs.*;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.JobContext;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-
-import java.net.URI;
-
-/**
- * Distribute application-specific large, read-only files efficiently.
- *
- * <p><code>DistributedCache</code> is a facility provided by the Map-Reduce
- * framework to cache files (text, archives, jars etc.) needed by applications.
- * </p>
- *
- * <p>Applications specify the files, via urls (hdfs:// or http://) to be cached
- * via the {@link org.apache.hadoop.mapred.JobConf}. The
- * <code>DistributedCache</code> assumes that the files specified via urls are
- * already present on the {@link FileSystem} at the path specified by the url
- * and are accessible by every machine in the cluster.</p>
- *
- * <p>The framework will copy the necessary files on to the slave node before
- * any tasks for the job are executed on that node. Its efficiency stems from
- * the fact that the files are only copied once per job and the ability to
- * cache archives which are un-archived on the slaves.</p>
- *
- * <p><code>DistributedCache</code> can be used to distribute simple, read-only
- * data/text files and/or more complex types such as archives, jars etc.
- * Archives (zip, tar and tgz/tar.gz files) are un-archived at the slave nodes.
- * Jars may be optionally added to the classpath of the tasks, a rudimentary
- * software distribution mechanism. Files have execution permissions.
- * Optionally users can also direct it to symlink the distributed cache file(s)
- * into the working directory of the task.</p>
- *
- * <p><code>DistributedCache</code> tracks modification timestamps of the cache
- * files. Clearly the cache files should not be modified by the application
- * or externally while the job is executing.</p>
- *
- * <p>Here is an illustrative example on how to use the
- * <code>DistributedCache</code>:</p>
- * <p><blockquote><pre>
- * // Setting up the cache for the application
- *
- * 1. Copy the requisite files to the <code>FileSystem</code>:
- *
- * $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat
- * $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip
- * $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar
- * $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar
- * $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz
- * $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz
- *
- * 2. Setup the application's <code>JobConf</code>:
- *
- * JobConf job = new JobConf();
- * DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"),
- * job);
- * DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job);
- * DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job);
- * DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar", job);
- * DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz", job);
- * DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz", job);
- *
- * 3. Use the cached files in the {@link org.apache.hadoop.mapred.Mapper}
- * or {@link org.apache.hadoop.mapred.Reducer}:
- *
- * public static class MapClass extends MapReduceBase
- * implements Mapper<K, V, K, V> {
- *
- * private Path[] localArchives;
- * private Path[] localFiles;
- *
- * public void configure(JobConf job) {
- * // Get the cached archives/files
- * localArchives = DistributedCache.getLocalCacheArchives(job);
- * localFiles = DistributedCache.getLocalCacheFiles(job);
- * }
- *
- * public void map(K key, V value,
- * OutputCollector<K, V> output, Reporter reporter)
- * throws IOException {
- * // Use data from the cached archives/files here
- * // ...
- * // ...
- * output.collect(k, v);
- * }
- * }
- *
- * </pre></blockquote></p>
- *
- * It is also very common to use the DistributedCache by using
- * {@link org.apache.hadoop.util.GenericOptionsParser}.
- *
- * This class includes methods that should be used by users
- * (specifically those mentioned in the example above, as well
- * as {@link DistributedCache#addArchiveToClassPath(Path, Configuration)}),
- * as well as methods intended for use by the MapReduce framework
- * (e.g., {@link org.apache.hadoop.mapred.JobClient}).
- *
- * @see org.apache.hadoop.mapred.JobConf
- * @see org.apache.hadoop.mapred.JobClient
- */
-@Deprecated
-@InterfaceAudience.Private
-public class DistributedCache {
-
- /**
- * Set the configuration with the given set of archives. Intended
- * to be used by user code.
- * @param archives The list of archives that need to be localized
- * @param conf Configuration which will be changed
- * @deprecated Use {@link Job#setCacheArchives(URI[])} instead
- */
- @Deprecated
- public static void setCacheArchives(URI[] archives, Configuration conf) {
- String sarchives = StringUtils.uriToString(archives);
- conf.set(MRJobConfig.CACHE_ARCHIVES, sarchives);
- }
-
- /**
- * Set the configuration with the given set of files. Intended to be
- * used by user code.
- * @param files The list of files that need to be localized
- * @param conf Configuration which will be changed
- * @deprecated Use {@link Job#setCacheFiles(URI[])} instead
- */
- @Deprecated
- public static void setCacheFiles(URI[] files, Configuration conf) {
- String sfiles = StringUtils.uriToString(files);
- conf.set(MRJobConfig.CACHE_FILES, sfiles);
- }
-
- /**
- * Get cache archives set in the Configuration. Used by
- * internal DistributedCache and MapReduce code.
- * @param conf The configuration which contains the archives
- * @return A URI array of the caches set in the Configuration
- * @throws IOException
- * @deprecated Use {@link JobContext#getCacheArchives()} instead
- */
- @Deprecated
- public static URI[] getCacheArchives(Configuration conf) throws IOException {
- return StringUtils.stringToURI(conf.getStrings(MRJobConfig.CACHE_ARCHIVES));
- }
-
- /**
- * Get cache files set in the Configuration. Used by internal
- * DistributedCache and MapReduce code.
- * @param conf The configuration which contains the files
- * @return A URI array of the files set in the Configuration
- * @throws IOException
- * @deprecated Use {@link JobContext#getCacheFiles()} instead
- */
- @Deprecated
- public static URI[] getCacheFiles(Configuration conf) throws IOException {
- return StringUtils.stringToURI(conf.getStrings(MRJobConfig.CACHE_FILES));
- }
-
- /**
- * Return the path array of the localized caches. Intended to be used
- * by user code.
- * @param conf Configuration that contains the localized archives
- * @return A path array of localized caches
- * @throws IOException
- * @deprecated Use {@link JobContext#getLocalCacheArchives()} instead
- */
- @Deprecated
- public static Path[] getLocalCacheArchives(Configuration conf)
- throws IOException {
- return StringUtils.stringToPath(conf
- .getStrings(MRJobConfig.CACHE_LOCALARCHIVES));
- }
-
- /**
- * Return the path array of the localized files. Intended to be used
- * by user code.
- * @param conf Configuration that contains the localized files
- * @return A path array of localized files
- * @throws IOException
- * @deprecated Use {@link JobContext#getLocalCacheFiles()} instead
- */
- @Deprecated
- public static Path[] getLocalCacheFiles(Configuration conf)
- throws IOException {
- return StringUtils.stringToPath(conf.getStrings(MRJobConfig.CACHE_LOCALFILES));
- }
-
- /**
- * Get the timestamps of the archives. Used by internal
- * DistributedCache and MapReduce code.
- * @param conf The configuration which stored the timestamps
- * @return a string array of timestamps
- * @throws IOException
- * @deprecated Use {@link JobContext#getArchiveTimestamps()} instead
- */
- @Deprecated
- public static String[] getArchiveTimestamps(Configuration conf) {
- return conf.getStrings(MRJobConfig.CACHE_ARCHIVES_TIMESTAMPS);
- }
-
-
- /**
- * Get the timestamps of the files. Used by internal
- * DistributedCache and MapReduce code.
- * @param conf The configuration which stored the timestamps
- * @return a string array of timestamps
- * @throws IOException
- * @deprecated Use {@link JobContext#getFileTimestamps()} instead
- */
- @Deprecated
- public static String[] getFileTimestamps(Configuration conf) {
- return conf.getStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS);
- }
-
- /**
- * Add a archives to be localized to the conf. Intended to
- * be used by user code.
- * @param uri The uri of the cache to be localized
- * @param conf Configuration to add the cache to
- * @deprecated Use {@link Job#addCacheArchive(URI)} instead
- */
- @Deprecated
- public static void addCacheArchive(URI uri, Configuration conf) {
- String archives = conf.get(MRJobConfig.CACHE_ARCHIVES);
- conf.set(MRJobConfig.CACHE_ARCHIVES, archives == null ? uri.toString()
- : archives + "," + uri.toString());
- }
-
- /**
- * Add a file to be localized to the conf. Intended
- * to be used by user code.
- * @param uri The uri of the cache to be localized
- * @param conf Configuration to add the cache to
- * @deprecated Use {@link Job#addCacheFile(URI)} instead
- */
- @Deprecated
- public static void addCacheFile(URI uri, Configuration conf) {
- String files = conf.get(MRJobConfig.CACHE_FILES);
- conf.set(MRJobConfig.CACHE_FILES, files == null ? uri.toString() : files + ","
- + uri.toString());
- }
-
- /**
- * Add an file path to the current set of classpath entries It adds the file
- * to cache as well. Intended to be used by user code.
- *
- * @param file Path of the file to be added
- * @param conf Configuration that contains the classpath setting
- * @deprecated Use {@link Job#addFileToClassPath(Path)} instead
- */
- @Deprecated
- public static void addFileToClassPath(Path file, Configuration conf)
- throws IOException {
- String classpath = conf.get(MRJobConfig.CLASSPATH_FILES);
- conf.set(MRJobConfig.CLASSPATH_FILES, classpath == null ? file.toString()
- : classpath + "," + file.toString());
- FileSystem fs = FileSystem.get(conf);
- URI uri = fs.makeQualified(file).toUri();
-
- addCacheFile(uri, conf);
- }
-
- /**
- * Get the file entries in classpath as an array of Path.
- * Used by internal DistributedCache code.
- *
- * @param conf Configuration that contains the classpath setting
- * @deprecated Use {@link JobContext#getFileClassPaths()} instead
- */
- @Deprecated
- public static Path[] getFileClassPaths(Configuration conf) {
- ArrayList<String> list = (ArrayList<String>)conf.getStringCollection(
- MRJobConfig.CLASSPATH_FILES);
- if (list.size() == 0) {
- return null;
- }
- Path[] paths = new Path[list.size()];
- for (int i = 0; i < list.size(); i++) {
- paths[i] = new Path(list.get(i));
- }
- return paths;
- }
-
- /**
- * Add an archive path to the current set of classpath entries. It adds the
- * archive to cache as well. Intended to be used by user code.
- *
- * @param archive Path of the archive to be added
- * @param conf Configuration that contains the classpath setting
- * @deprecated Use {@link Job#addArchiveToClassPath(Path)} instead
- */
- @Deprecated
- public static void addArchiveToClassPath(Path archive, Configuration conf)
- throws IOException {
- String classpath = conf.get(MRJobConfig.CLASSPATH_ARCHIVES);
- conf.set(MRJobConfig.CLASSPATH_ARCHIVES, classpath == null ? archive
- .toString() : classpath + "," + archive.toString());
- FileSystem fs = FileSystem.get(conf);
- URI uri = fs.makeQualified(archive).toUri();
-
- addCacheArchive(uri, conf);
- }
-
- /**
- * Get the archive entries in classpath as an array of Path.
- * Used by internal DistributedCache code.
- *
- * @param conf Configuration that contains the classpath setting
- * @deprecated Use {@link JobContext#getArchiveClassPaths()} instead
- */
- @Deprecated
- public static Path[] getArchiveClassPaths(Configuration conf) {
- ArrayList<String> list = (ArrayList<String>)conf.getStringCollection(
- MRJobConfig.CLASSPATH_ARCHIVES);
- if (list.size() == 0) {
- return null;
- }
- Path[] paths = new Path[list.size()];
- for (int i = 0; i < list.size(); i++) {
- paths[i] = new Path(list.get(i));
- }
- return paths;
- }
-
- /**
- * This method allows you to create symlinks in the current working directory
- * of the task to all the cache files/archives.
- * Intended to be used by user code.
- * @param conf the jobconf
- * @deprecated Use {@link Job#createSymlink()} instead
- */
- @Deprecated
- public static void createSymlink(Configuration conf){
- conf.set(MRJobConfig.CACHE_SYMLINK, "yes");
- }
-
- /**
- * This method checks to see if symlinks are to be create for the
- * localized cache files in the current working directory
- * Used by internal DistributedCache code.
- * @param conf the jobconf
- * @return true if symlinks are to be created- else return false
- * @deprecated Use {@link JobContext#getSymlink()} instead
- */
- @Deprecated
- public static boolean getSymlink(Configuration conf){
- String result = conf.get(MRJobConfig.CACHE_SYMLINK);
- if ("yes".equals(result)){
- return true;
- }
- return false;
- }
-
- /**
- * This method checks if there is a conflict in the fragment names
- * of the uris. Also makes sure that each uri has a fragment. It
- * is only to be called if you want to create symlinks for
- * the various archives and files. May be used by user code.
- * @param uriFiles The uri array of urifiles
- * @param uriArchives the uri array of uri archives
- */
- public static boolean checkURIs(URI[] uriFiles, URI[] uriArchives) {
- if ((uriFiles == null) && (uriArchives == null)) {
- return true;
- }
- // check if fragment is null for any uri
- // also check if there are any conflicts in fragment names
- Set<String> fragments = new HashSet<String>();
-
- // iterate over file uris
- if (uriFiles != null) {
- for (int i = 0; i < uriFiles.length; i++) {
- String fragment = uriFiles[i].getFragment();
- if (fragment == null) {
- return false;
- }
- String lowerCaseFragment = fragment.toLowerCase();
- if (fragments.contains(lowerCaseFragment)) {
- return false;
- }
- fragments.add(lowerCaseFragment);
- }
- }
-
- // iterate over archive uris
- if (uriArchives != null) {
- for (int i = 0; i < uriArchives.length; i++) {
- String fragment = uriArchives[i].getFragment();
- if (fragment == null) {
- return false;
- }
- String lowerCaseFragment = fragment.toLowerCase();
- if (fragments.contains(lowerCaseFragment)) {
- return false;
- }
- fragments.add(lowerCaseFragment);
- }
- }
- return true;
- }
-
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/AvroArrayUtils.java b/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/AvroArrayUtils.java
deleted file mode 100644
index 99ce903..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/AvroArrayUtils.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce.jobhistory;
-
-import java.lang.Integer;
-import java.util.Iterator;
-
-import org.apache.avro.Schema;
-
-import org.apache.avro.generic.GenericArray;
-import org.apache.avro.generic.GenericData;
-
-public class AvroArrayUtils {
-
- private static final Schema ARRAY_INT
- = Schema.createArray(Schema.create(Schema.Type.INT));
-
- static public GenericArray<Integer> NULL_PROGRESS_SPLITS_ARRAY
- = new GenericData.Array<Integer>(0, ARRAY_INT);
-
- public static GenericArray<Integer>
- toAvro(int values[]) {
- GenericData.Array<Integer> result
- = new GenericData.Array<Integer>(values.length, ARRAY_INT);
-
- for (int i = 0; i < values.length; ++i) {
- result.add(values[i]);
- }
-
- return result;
- }
-
- public static int[] fromAvro(GenericArray<Integer> avro) {
- int[] result = new int[(int)avro.size()];
-
- int i = 0;
-
- for (Iterator<Integer> iter = avro.iterator(); iter.hasNext(); ++i) {
- result[i] = iter.next();
- }
-
- return result;
- }
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java b/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
deleted file mode 100644
index d8516a0..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce.jobhistory;
-
-import java.io.IOException;
-import java.util.Iterator;
-
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.mapreduce.Counter;
-import org.apache.hadoop.mapreduce.CounterGroup;
-import org.apache.hadoop.mapreduce.Counters;
-
-import org.apache.avro.Schema;
-import org.apache.avro.io.Encoder;
-import org.apache.avro.io.JsonEncoder;
-import org.apache.avro.io.DatumWriter;
-import org.apache.avro.specific.SpecificDatumWriter;
-import org.apache.avro.generic.GenericData;
-import org.apache.avro.util.Utf8;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-/**
- * Event Writer is an utility class used to write events to the underlying
- * stream. Typically, one event writer (which translates to one stream)
- * is created per job
- *
- */
-class EventWriter {
- static final String VERSION = "Avro-Json";
-
- private FSDataOutputStream out;
- private DatumWriter<Event> writer =
- new SpecificDatumWriter<Event>(Event.class);
- private Encoder encoder;
- private static final Log LOG = LogFactory.getLog(EventWriter.class);
-
- EventWriter(FSDataOutputStream out) throws IOException {
- this.out = out;
- out.writeBytes(VERSION);
- out.writeBytes("\n");
- out.writeBytes(Event.SCHEMA$.toString());
- out.writeBytes("\n");
- this.encoder = new JsonEncoder(Event.SCHEMA$, out);
- }
-
- synchronized void write(HistoryEvent event) throws IOException {
- Event wrapper = new Event();
- wrapper.type = event.getEventType();
- wrapper.event = event.getDatum();
- writer.write(wrapper, encoder);
- encoder.flush();
- out.writeBytes("\n");
- }
-
- void flush() throws IOException {
- encoder.flush();
- out.flush();
- }
-
- void close() throws IOException {
- try {
- encoder.flush();
- out.close();
- out = null;
- } finally {
- IOUtils.cleanup(LOG, out);
- }
- }
-
- private static final Schema GROUPS =
- Schema.createArray(JhCounterGroup.SCHEMA$);
-
- private static final Schema COUNTERS =
- Schema.createArray(JhCounter.SCHEMA$);
-
- static JhCounters toAvro(Counters counters) {
- return toAvro(counters, "COUNTERS");
- }
- static JhCounters toAvro(Counters counters, String name) {
- JhCounters result = new JhCounters();
- result.name = new Utf8(name);
- result.groups = new GenericData.Array<JhCounterGroup>(0, GROUPS);
- if (counters == null) return result;
- for (CounterGroup group : counters) {
- JhCounterGroup g = new JhCounterGroup();
- g.name = new Utf8(group.getName());
- g.displayName = new Utf8(group.getDisplayName());
- g.counts = new GenericData.Array<JhCounter>(group.size(), COUNTERS);
- for (Counter counter : group) {
- JhCounter c = new JhCounter();
- c.name = new Utf8(counter.getName());
- c.displayName = new Utf8(counter.getDisplayName());
- c.value = counter.getValue();
- g.counts.add(c);
- }
- result.groups.add(g);
- }
- return result;
- }
-
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java b/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
deleted file mode 100644
index 895178b..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
+++ /dev/null
@@ -1,797 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapreduce.jobhistory;
-
-import java.io.IOException;
-import java.text.DecimalFormat;
-import java.text.Format;
-import java.text.SimpleDateFormat;
-import java.util.Arrays;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapred.JobInProgress;
-import org.apache.hadoop.mapred.JobStatus;
-import org.apache.hadoop.mapred.TaskLogServlet;
-import org.apache.hadoop.mapred.TaskStatus;
-import org.apache.hadoop.mapreduce.CounterGroup;
-import org.apache.hadoop.mapreduce.Counters;
-import org.apache.hadoop.mapreduce.TaskAttemptID;
-import org.apache.hadoop.mapreduce.TaskID;
-import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
-import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
-import org.apache.hadoop.util.StringUtils;
-
-/**
- * HistoryViewer is used to parse and view the JobHistory files
- *
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class HistoryViewer {
- private static SimpleDateFormat dateFormat =
- new SimpleDateFormat("d-MMM-yyyy HH:mm:ss");
- private FileSystem fs;
- private JobInfo job;
- private String jobId;
- private boolean printAll;
-
-/**
- * Constructs the HistoryViewer object
- * @param historyFile The fully qualified Path of the History File
- * @param conf The Configuration file
- * @param printAll Toggle to print all status to only killed/failed status
- * @throws IOException
- */
- public HistoryViewer(String historyFile,
- Configuration conf,
- boolean printAll) throws IOException {
- this.printAll = printAll;
- String errorMsg = "Unable to initialize History Viewer";
- try {
- Path jobFile = new Path(historyFile);
- fs = jobFile.getFileSystem(conf);
- String[] jobDetails =
- jobFile.getName().split("_");
- if (jobDetails.length < 2) {
- // NOT a valid name
- System.err.println("Ignore unrecognized file: " + jobFile.getName());
- throw new IOException(errorMsg);
- }
- JobHistoryParser parser = new JobHistoryParser(fs, jobFile);
- job = parser.parse();
- jobId = job.getJobId().toString();
- } catch(Exception e) {
- throw new IOException(errorMsg, e);
- }
- }
-
- /**
- * Print the job/task/attempt summary information
- * @throws IOException
- */
- public void print() throws IOException{
- printJobDetails();
- printTaskSummary();
- printJobAnalysis();
- printTasks(TaskType.JOB_SETUP, TaskStatus.State.FAILED.toString());
- printTasks(TaskType.JOB_SETUP, TaskStatus.State.KILLED.toString());
- printTasks(TaskType.MAP, TaskStatus.State.FAILED.toString());
- printTasks(TaskType.MAP, TaskStatus.State.KILLED.toString());
- printTasks(TaskType.REDUCE, TaskStatus.State.FAILED.toString());
- printTasks(TaskType.REDUCE, TaskStatus.State.KILLED.toString());
- printTasks(TaskType.JOB_CLEANUP, TaskStatus.State.FAILED.toString());
- printTasks(TaskType.JOB_CLEANUP,
- JobStatus.getJobRunState(JobStatus.KILLED));
- if (printAll) {
- printTasks(TaskType.JOB_SETUP, TaskStatus.State.SUCCEEDED.toString());
- printTasks(TaskType.MAP, TaskStatus.State.SUCCEEDED.toString());
- printTasks(TaskType.REDUCE, TaskStatus.State.SUCCEEDED.toString());
- printTasks(TaskType.JOB_CLEANUP, TaskStatus.State.SUCCEEDED.toString());
- printAllTaskAttempts(TaskType.JOB_SETUP);
- printAllTaskAttempts(TaskType.MAP);
- printAllTaskAttempts(TaskType.REDUCE);
- printAllTaskAttempts(TaskType.JOB_CLEANUP);
- }
-
- FilteredJob filter = new FilteredJob(job,
- TaskStatus.State.FAILED.toString());
- printFailedAttempts(filter);
-
- filter = new FilteredJob(job,
- TaskStatus.State.KILLED.toString());
- printFailedAttempts(filter);
- }
-
- private void printJobDetails() {
- StringBuffer jobDetails = new StringBuffer();
- jobDetails.append("\nHadoop job: " ).append(job.getJobId());
- jobDetails.append("\n=====================================");
- jobDetails.append("\nUser: ").append(job.getUsername());
- jobDetails.append("\nJobName: ").append(job.getJobname());
- jobDetails.append("\nJobConf: ").append(job.getJobConfPath());
- jobDetails.append("\nSubmitted At: ").append(StringUtils.
- getFormattedTimeWithDiff(dateFormat,
- job.getSubmitTime(), 0));
- jobDetails.append("\nLaunched At: ").append(StringUtils.
- getFormattedTimeWithDiff(dateFormat,
- job.getLaunchTime(),
- job.getSubmitTime()));
- jobDetails.append("\nFinished At: ").append(StringUtils.
- getFormattedTimeWithDiff(dateFormat,
- job.getFinishTime(),
- job.getLaunchTime()));
- jobDetails.append("\nStatus: ").append(((job.getJobStatus() == null) ?
- "Incomplete" :job.getJobStatus()));
- printCounters(jobDetails, job.getTotalCounters(), job.getMapCounters(),
- job.getReduceCounters());
- jobDetails.append("\n");
- jobDetails.append("\n=====================================");
- System.out.println(jobDetails.toString());
- }
-
- private void printCounters(StringBuffer buff, Counters totalCounters,
- Counters mapCounters, Counters reduceCounters) {
- // Killed jobs might not have counters
- if (totalCounters == null) {
- return;
- }
- buff.append("\nCounters: \n\n");
- buff.append(String.format("|%1$-30s|%2$-30s|%3$-10s|%4$-10s|%5$-10s|",
- "Group Name",
- "Counter name",
- "Map Value",
- "Reduce Value",
- "Total Value"));
- buff.append("\n------------------------------------------"+
- "---------------------------------------------");
- for (String groupName : totalCounters.getGroupNames()) {
- CounterGroup totalGroup = totalCounters.getGroup(groupName);
- CounterGroup mapGroup = mapCounters.getGroup(groupName);
- CounterGroup reduceGroup = reduceCounters.getGroup(groupName);
-
- Format decimal = new DecimalFormat();
- Iterator<org.apache.hadoop.mapreduce.Counter> ctrItr =
- totalGroup.iterator();
- while(ctrItr.hasNext()) {
- org.apache.hadoop.mapreduce.Counter counter = ctrItr.next();
- String name = counter.getName();
- String mapValue =
- decimal.format(mapGroup.findCounter(name).getValue());
- String reduceValue =
- decimal.format(reduceGroup.findCounter(name).getValue());
- String totalValue =
- decimal.format(counter.getValue());
-
- buff.append(
- String.format("\n|%1$-30s|%2$-30s|%3$-10s|%4$-10s|%5$-10s",
- totalGroup.getDisplayName(),
- counter.getDisplayName(),
- mapValue, reduceValue, totalValue));
- }
- }
- }
-
- private void printAllTaskAttempts(TaskType taskType) {
- Map<TaskID, TaskInfo> tasks = job.getAllTasks();
- StringBuffer taskList = new StringBuffer();
- taskList.append("\n").append(taskType);
- taskList.append(" task list for ").append(job.getJobId());
- taskList.append("\nTaskId\t\tStartTime");
- if (TaskType.REDUCE.equals(taskType)) {
- taskList.append("\tShuffleFinished\tSortFinished");
- }
- taskList.append("\tFinishTime\tHostName\tError\tTaskLogs");
- taskList.append("\n====================================================");
- System.out.println(taskList.toString());
- for (JobHistoryParser.TaskInfo task : tasks.values()) {
- for (JobHistoryParser.TaskAttemptInfo attempt :
- task.getAllTaskAttempts().values()) {
- if (taskType.equals(task.getTaskType())){
- taskList.setLength(0);
- taskList.append(attempt.getAttemptId()).append("\t");
- taskList.append(StringUtils.getFormattedTimeWithDiff(dateFormat,
- attempt.getStartTime(), 0)).append("\t");
- if (TaskType.REDUCE.equals(taskType)) {
- taskList.append(StringUtils.getFormattedTimeWithDiff(dateFormat,
- attempt.getShuffleFinishTime(),
- attempt.getStartTime()));
- taskList.append("\t");
- taskList.append(StringUtils.getFormattedTimeWithDiff(dateFormat,
- attempt.getSortFinishTime(),
- attempt.getShuffleFinishTime()));
- }
- taskList.append(StringUtils.getFormattedTimeWithDiff(dateFormat,
- attempt.getFinishTime(),
- attempt.getStartTime()));
- taskList.append("\t");
- taskList.append(attempt.getHostname()).append("\t");
- taskList.append(attempt.getError());
- String taskLogsUrl = getTaskLogsUrl(attempt);
- taskList.append(taskLogsUrl != null ? taskLogsUrl : "n/a");
- System.out.println(taskList.toString());
- }
- }
- }
- }
-
- private void printTaskSummary() {
- SummarizedJob ts = new SummarizedJob(job);
- StringBuffer taskSummary = new StringBuffer();
- taskSummary.append("\nTask Summary");
- taskSummary.append("\n============================");
- taskSummary.append("\nKind\tTotal\t");
- taskSummary.append("Successful\tFailed\tKilled\tStartTime\tFinishTime");
- taskSummary.append("\n");
- taskSummary.append("\nSetup\t").append(ts.totalSetups);
- taskSummary.append("\t").append(ts.numFinishedSetups);
- taskSummary.append("\t\t").append(ts.numFailedSetups);
- taskSummary.append("\t").append(ts.numKilledSetups);
- taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
- dateFormat, ts.setupStarted, 0));
- taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
- dateFormat, ts.setupFinished, ts.setupStarted));
- taskSummary.append("\nMap\t").append(ts.totalMaps);
- taskSummary.append("\t").append(job.getFinishedMaps());
- taskSummary.append("\t\t").append(ts.numFailedMaps);
- taskSummary.append("\t").append(ts.numKilledMaps);
- taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
- dateFormat, ts.mapStarted, 0));
- taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
- dateFormat, ts.mapFinished, ts.mapStarted));
- taskSummary.append("\nReduce\t").append(ts.totalReduces);
- taskSummary.append("\t").append(job.getFinishedReduces());
- taskSummary.append("\t\t").append(ts.numFailedReduces);
- taskSummary.append("\t").append(ts.numKilledReduces);
- taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
- dateFormat, ts.reduceStarted, 0));
- taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
- dateFormat, ts.reduceFinished, ts.reduceStarted));
- taskSummary.append("\nCleanup\t").append(ts.totalCleanups);
- taskSummary.append("\t").append(ts.numFinishedCleanups);
- taskSummary.append("\t\t").append(ts.numFailedCleanups);
- taskSummary.append("\t").append(ts.numKilledCleanups);
- taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
- dateFormat, ts.cleanupStarted, 0));
- taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
- dateFormat, ts.cleanupFinished,
- ts.cleanupStarted));
- taskSummary.append("\n============================\n");
- System.out.println(taskSummary.toString());
- }
-
- private void printJobAnalysis() {
- if (!job.getJobStatus().equals
- (JobStatus.getJobRunState(JobStatus.SUCCEEDED))) {
- System.out.println("No Analysis available as job did not finish");
- return;
- }
-
- AnalyzedJob avg = new AnalyzedJob(job);
-
- System.out.println("\nAnalysis");
- System.out.println("=========");
- printAnalysis(avg.getMapTasks(), cMap, "map", avg.getAvgMapTime(), 10);
- printLast(avg.getMapTasks(), "map", cFinishMapRed);
-
- if (avg.getReduceTasks().length > 0) {
- printAnalysis(avg.getReduceTasks(), cShuffle, "shuffle",
- avg.getAvgShuffleTime(), 10);
- printLast(avg.getReduceTasks(), "shuffle", cFinishShuffle);
-
- printAnalysis(avg.getReduceTasks(), cReduce, "reduce",
- avg.getAvgReduceTime(), 10);
- printLast(avg.getReduceTasks(), "reduce", cFinishMapRed);
- }
- System.out.println("=========");
- }
-
- private void printAnalysis(JobHistoryParser.TaskAttemptInfo [] tasks,
- Comparator<JobHistoryParser.TaskAttemptInfo> cmp,
- String taskType,
- long avg,
- int showTasks) {
- Arrays.sort(tasks, cmp);
- JobHistoryParser.TaskAttemptInfo min = tasks[tasks.length-1];
- StringBuffer details = new StringBuffer();
- details.append("\nTime taken by best performing ");
- details.append(taskType).append(" task ");
- details.append(min.getAttemptId().getTaskID().toString()).append(": ");
- if ("map".equals(taskType)) {
- details.append(StringUtils.formatTimeDiff(
- min.getFinishTime(),
- min.getStartTime()));
- } else if ("shuffle".equals(taskType)) {
- details.append(StringUtils.formatTimeDiff(
- min.getShuffleFinishTime(),
- min.getStartTime()));
- } else {
- details.append(StringUtils.formatTimeDiff(
- min.getFinishTime(),
- min.getShuffleFinishTime()));
- }
- details.append("\nAverage time taken by ");
- details.append(taskType).append(" tasks: ");
- details.append(StringUtils.formatTimeDiff(avg, 0));
- details.append("\nWorse performing ");
- details.append(taskType).append(" tasks: ");
- details.append("\nTaskId\t\tTimetaken");
- System.out.println(details.toString());
- for (int i = 0; i < showTasks && i < tasks.length; i++) {
- details.setLength(0);
- details.append(tasks[i].getAttemptId().getTaskID()).append(" ");
- if ("map".equals(taskType)) {
- details.append(StringUtils.formatTimeDiff(
- tasks[i].getFinishTime(),
- tasks[i].getStartTime()));
- } else if ("shuffle".equals(taskType)) {
- details.append(StringUtils.formatTimeDiff(
- tasks[i].getShuffleFinishTime(),
- tasks[i].getStartTime()));
- } else {
- details.append(StringUtils.formatTimeDiff(
- tasks[i].getFinishTime(),
- tasks[i].getShuffleFinishTime()));
- }
- System.out.println(details.toString());
- }
- }
-
- private void printLast(JobHistoryParser.TaskAttemptInfo [] tasks,
- String taskType,
- Comparator<JobHistoryParser.TaskAttemptInfo> cmp
- ) {
- Arrays.sort(tasks, cFinishMapRed);
- JobHistoryParser.TaskAttemptInfo last = tasks[0];
- StringBuffer lastBuf = new StringBuffer();
- lastBuf.append("The last ").append(taskType);
- lastBuf.append(" task ").append(last.getAttemptId().getTaskID());
- Long finishTime;
- if ("shuffle".equals(taskType)) {
- finishTime = last.getShuffleFinishTime();
- } else {
- finishTime = last.getFinishTime();
- }
- lastBuf.append(" finished at (relative to the Job launch time): ");
- lastBuf.append(StringUtils.getFormattedTimeWithDiff(dateFormat,
- finishTime, job.getLaunchTime()));
- System.out.println(lastBuf.toString());
- }
-
- private void printTasks(TaskType taskType, String status) {
- Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks();
- StringBuffer header = new StringBuffer();
- header.append("\n").append(status).append(" ");
- header.append(taskType).append(" task list for ").append(jobId);
- header.append("\nTaskId\t\tStartTime\tFinishTime\tError");
- if (TaskType.MAP.equals(taskType)) {
- header.append("\tInputSplits");
- }
- header.append("\n====================================================");
- StringBuffer taskList = new StringBuffer();
- for (JobHistoryParser.TaskInfo task : tasks.values()) {
- if (taskType.equals(task.getTaskType()) &&
- (status.equals(task.getTaskStatus())
- || status.equalsIgnoreCase("ALL"))) {
- taskList.setLength(0);
- taskList.append(task.getTaskId());
- taskList.append("\t").append(StringUtils.getFormattedTimeWithDiff(
- dateFormat, task.getStartTime(), 0));
- taskList.append("\t").append(StringUtils.getFormattedTimeWithDiff(
- dateFormat, task.getFinishTime(),
- task.getStartTime()));
- taskList.append("\t").append(task.getError());
- if (TaskType.MAP.equals(taskType)) {
- taskList.append("\t").append(task.getSplitLocations());
- }
- if (taskList != null) {
- System.out.println(header.toString());
- System.out.println(taskList.toString());
- }
- }
- }
- }
-
- private void printFailedAttempts(FilteredJob filteredJob) {
- Map<String, Set<TaskID>> badNodes = filteredJob.getFilteredMap();
- StringBuffer attempts = new StringBuffer();
- if (badNodes.size() > 0) {
- attempts.append("\n").append(filteredJob.getFilter());
- attempts.append(" task attempts by nodes");
- attempts.append("\nHostname\tFailedTasks");
- attempts.append("\n===============================");
- System.out.println(attempts.toString());
- for (Map.Entry<String,
- Set<TaskID>> entry : badNodes.entrySet()) {
- String node = entry.getKey();
- Set<TaskID> failedTasks = entry.getValue();
- attempts.setLength(0);
- attempts.append(node).append("\t");
- for (TaskID t : failedTasks) {
- attempts.append(t).append(", ");
- }
- System.out.println(attempts.toString());
- }
- }
- }
-
- /**
- * Return the TaskLogsUrl of a particular TaskAttempt
- *
- * @param attempt
- * @return the taskLogsUrl. null if http-port or tracker-name or
- * task-attempt-id are unavailable.
- */
- public static String getTaskLogsUrl(
- JobHistoryParser.TaskAttemptInfo attempt) {
- if (attempt.getHttpPort() == -1
- || attempt.getTrackerName().equals("")
- || attempt.getAttemptId() == null) {
- return null;
- }
-
- String taskTrackerName =
- JobInProgress.convertTrackerNameToHostName(
- attempt.getTrackerName());
- return TaskLogServlet.getTaskLogUrl(taskTrackerName,
- Integer.toString(attempt.getHttpPort()),
- attempt.getAttemptId().toString());
- }
-
- private Comparator<JobHistoryParser.TaskAttemptInfo> cMap =
- new Comparator<JobHistoryParser.TaskAttemptInfo>() {
- public int compare(JobHistoryParser.TaskAttemptInfo t1,
- JobHistoryParser.TaskAttemptInfo t2) {
- long l1 = t1.getFinishTime() - t1.getStartTime();
- long l2 = t2.getFinishTime() - t2.getStartTime();
- return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1));
- }
- };
-
- private Comparator<JobHistoryParser.TaskAttemptInfo> cShuffle =
- new Comparator<JobHistoryParser.TaskAttemptInfo>() {
- public int compare(JobHistoryParser.TaskAttemptInfo t1,
- JobHistoryParser.TaskAttemptInfo t2) {
- long l1 = t1.getShuffleFinishTime() - t1.getStartTime();
- long l2 = t2.getShuffleFinishTime() - t2.getStartTime();
- return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1));
- }
- };
-
- private Comparator<JobHistoryParser.TaskAttemptInfo> cFinishShuffle =
- new Comparator<JobHistoryParser.TaskAttemptInfo>() {
- public int compare(JobHistoryParser.TaskAttemptInfo t1,
- JobHistoryParser.TaskAttemptInfo t2) {
- long l1 = t1.getShuffleFinishTime();
- long l2 = t2.getShuffleFinishTime();
- return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1));
- }
- };
-
- private Comparator<JobHistoryParser.TaskAttemptInfo> cFinishMapRed =
- new Comparator<JobHistoryParser.TaskAttemptInfo>() {
- public int compare(JobHistoryParser.TaskAttemptInfo t1,
- JobHistoryParser.TaskAttemptInfo t2) {
- long l1 = t1.getFinishTime();
- long l2 = t2.getFinishTime();
- return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1));
- }
- };
-
- private Comparator<JobHistoryParser.TaskAttemptInfo> cReduce =
- new Comparator<JobHistoryParser.TaskAttemptInfo>() {
- public int compare(JobHistoryParser.TaskAttemptInfo t1,
- JobHistoryParser.TaskAttemptInfo t2) {
- long l1 = t1.getFinishTime() -
- t1.getShuffleFinishTime();
- long l2 = t2.getFinishTime() -
- t2.getShuffleFinishTime();
- return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1));
- }
- };
-
- /**
- * Utility class used the summarize the job.
- * Used by HistoryViewer and the JobHistory UI.
- *
- */
- public static class SummarizedJob {
- Map<TaskID, JobHistoryParser.TaskInfo> tasks;
- int totalMaps = 0;
- int totalReduces = 0;
- int totalCleanups = 0;
- int totalSetups = 0;
- int numFailedMaps = 0;
- int numKilledMaps = 0;
- int numFailedReduces = 0;
- int numKilledReduces = 0;
- int numFinishedCleanups = 0;
- int numFailedCleanups = 0;
- int numKilledCleanups = 0;
- int numFinishedSetups = 0;
- int numFailedSetups = 0;
- int numKilledSetups = 0;
- long mapStarted = 0;
- long mapFinished = 0;
- long reduceStarted = 0;
- long reduceFinished = 0;
- long cleanupStarted = 0;
- long cleanupFinished = 0;
- long setupStarted = 0;
- long setupFinished = 0;
-
- /** Get total maps */
- public int getTotalMaps() { return totalMaps; }
- /** Get total reduces */
- public int getTotalReduces() { return totalReduces; }
- /** Get number of clean up tasks */
- public int getTotalCleanups() { return totalCleanups; }
- /** Get number of set up tasks */
- public int getTotalSetups() { return totalSetups; }
- /** Get number of failed maps */
- public int getNumFailedMaps() { return numFailedMaps; }
- /** Get number of killed maps */
- public int getNumKilledMaps() { return numKilledMaps; }
- /** Get number of failed reduces */
- public int getNumFailedReduces() { return numFailedReduces; }
- /** Get number of killed reduces */
- public int getNumKilledReduces() { return numKilledReduces; }
- /** Get number of cleanup tasks that finished */
- public int getNumFinishedCleanups() { return numFinishedCleanups; }
- /** Get number of failed cleanup tasks */
- public int getNumFailedCleanups() { return numFailedCleanups; }
- /** Get number of killed cleanup tasks */
- public int getNumKilledCleanups() { return numKilledCleanups; }
- /** Get number of finished set up tasks */
- public int getNumFinishedSetups() { return numFinishedSetups; }
- /** Get number of failed set up tasks */
- public int getNumFailedSetups() { return numFailedSetups; }
- /** Get number of killed set up tasks */
- public int getNumKilledSetups() { return numKilledSetups; }
- /** Get number of maps that were started */
- public long getMapStarted() { return mapStarted; }
- /** Get number of maps that finished */
- public long getMapFinished() { return mapFinished; }
- /** Get number of Reducers that were started */
- public long getReduceStarted() { return reduceStarted; }
- /** Get number of reducers that finished */
- public long getReduceFinished() { return reduceFinished; }
- /** Get number of cleanup tasks started */
- public long getCleanupStarted() { return cleanupStarted; }
- /** Get number of cleanup tasks that finished */
- public long getCleanupFinished() { return cleanupFinished; }
- /** Get number of setup tasks that started */
- public long getSetupStarted() { return setupStarted; }
- /** Get number of setup tasks that finished */
- public long getSetupFinished() { return setupFinished; }
-
- /** Create summary information for the parsed job */
- public SummarizedJob(JobInfo job) {
- tasks = job.getAllTasks();
-
- for (JobHistoryParser.TaskInfo task : tasks.values()) {
- Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts =
- task.getAllTaskAttempts();
- //allHosts.put(task.getHo(Keys.HOSTNAME), "");
- for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) {
- long startTime = attempt.getStartTime();
- long finishTime = attempt.getFinishTime();
- if (attempt.getTaskType().equals(TaskType.MAP)) {
- if (mapStarted== 0 || mapStarted > startTime) {
- mapStarted = startTime;
- }
- if (mapFinished < finishTime) {
- mapFinished = finishTime;
- }
- totalMaps++;
- if (attempt.getTaskStatus().equals
- (TaskStatus.State.FAILED.toString())) {
- numFailedMaps++;
- } else if (attempt.getTaskStatus().equals
- (TaskStatus.State.KILLED.toString())) {
- numKilledMaps++;
- }
- } else if (attempt.getTaskType().equals(TaskType.REDUCE)) {
- if (reduceStarted==0||reduceStarted > startTime) {
- reduceStarted = startTime;
- }
- if (reduceFinished < finishTime) {
- reduceFinished = finishTime;
- }
- totalReduces++;
- if (attempt.getTaskStatus().equals
- (TaskStatus.State.FAILED.toString())) {
- numFailedReduces++;
- } else if (attempt.getTaskStatus().equals
- (TaskStatus.State.KILLED.toString())) {
- numKilledReduces++;
- }
- } else if (attempt.getTaskType().equals(TaskType.JOB_CLEANUP)) {
- if (cleanupStarted==0||cleanupStarted > startTime) {
- cleanupStarted = startTime;
- }
- if (cleanupFinished < finishTime) {
- cleanupFinished = finishTime;
- }
- totalCleanups++;
- if (attempt.getTaskStatus().equals
- (TaskStatus.State.SUCCEEDED.toString())) {
- numFinishedCleanups++;
- } else if (attempt.getTaskStatus().equals
- (TaskStatus.State.FAILED.toString())) {
- numFailedCleanups++;
- } else if (attempt.getTaskStatus().equals
- (TaskStatus.State.KILLED.toString())) {
- numKilledCleanups++;
- }
- } else if (attempt.getTaskType().equals(TaskType.JOB_SETUP)) {
- if (setupStarted==0||setupStarted > startTime) {
- setupStarted = startTime;
- }
- if (setupFinished < finishTime) {
- setupFinished = finishTime;
- }
- totalSetups++;
- if (attempt.getTaskStatus().equals
- (TaskStatus.State.SUCCEEDED.toString())) {
- numFinishedSetups++;
- } else if (attempt.getTaskStatus().equals
- (TaskStatus.State.FAILED.toString())) {
- numFailedSetups++;
- } else if (attempt.getTaskStatus().equals
- (TaskStatus.State.KILLED.toString())) {
- numKilledSetups++;
- }
- }
- }
- }
- }
- }
-
- /**
- * Utility class used while analyzing the job.
- * Used by HistoryViewer and the JobHistory UI.
- */
-
- public static class AnalyzedJob {
- private long avgMapTime;
- private long avgReduceTime;
- private long avgShuffleTime;
-
- private JobHistoryParser.TaskAttemptInfo [] mapTasks;
- private JobHistoryParser.TaskAttemptInfo [] reduceTasks;
-
- /** Get the average map time */
- public long getAvgMapTime() { return avgMapTime; }
- /** Get the average reduce time */
- public long getAvgReduceTime() { return avgReduceTime; }
- /** Get the average shuffle time */
- public long getAvgShuffleTime() { return avgShuffleTime; }
- /** Get the map tasks list */
- public JobHistoryParser.TaskAttemptInfo [] getMapTasks() {
- return mapTasks;
- }
- /** Get the reduce tasks list */
- public JobHistoryParser.TaskAttemptInfo [] getReduceTasks() {
- return reduceTasks;
- }
- /** Generate analysis information for the parsed job */
- public AnalyzedJob (JobInfo job) {
- Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks();
- int finishedMaps = (int) job.getFinishedMaps();
- int finishedReduces = (int) job.getFinishedReduces();
- mapTasks =
- new JobHistoryParser.TaskAttemptInfo[finishedMaps];
- reduceTasks =
- new JobHistoryParser.TaskAttemptInfo[finishedReduces];
- int mapIndex = 0 , reduceIndex=0;
- avgMapTime = 0;
- avgReduceTime = 0;
- avgShuffleTime = 0;
-
- for (JobHistoryParser.TaskInfo task : tasks.values()) {
- Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts =
- task.getAllTaskAttempts();
- for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) {
- if (attempt.getTaskStatus().
- equals(TaskStatus.State.SUCCEEDED.toString())) {
- long avgFinishTime = (attempt.getFinishTime() -
- attempt.getStartTime());
- if (attempt.getTaskType().equals(TaskType.MAP)) {
- mapTasks[mapIndex++] = attempt;
- avgMapTime += avgFinishTime;
- } else if (attempt.getTaskType().equals(TaskType.REDUCE)) {
- reduceTasks[reduceIndex++] = attempt;
- avgShuffleTime += (attempt.getShuffleFinishTime() -
- attempt.getStartTime());
- avgReduceTime += (attempt.getFinishTime() -
- attempt.getShuffleFinishTime());
- }
- break;
- }
- }
- }
- if (finishedMaps > 0) {
- avgMapTime /= finishedMaps;
- }
- if (finishedReduces > 0) {
- avgReduceTime /= finishedReduces;
- avgShuffleTime /= finishedReduces;
- }
- }
- }
-
- /**
- * Utility to filter out events based on the task status
- *
- */
- public static class FilteredJob {
-
- private Map<String, Set<TaskID>> badNodesToFilteredTasks =
- new HashMap<String, Set<TaskID>>();
-
- private String filter;
-
- /** Get the map of the filtered tasks */
- public Map<String, Set<TaskID>> getFilteredMap() {
- return badNodesToFilteredTasks;
- }
-
- /** Get the current filter */
- public String getFilter() { return filter; }
-
- /** Apply the filter (status) on the parsed job and generate summary */
- public FilteredJob(JobInfo job, String status) {
-
- filter = status;
-
- Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks();
-
- for (JobHistoryParser.TaskInfo task : tasks.values()) {
- Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts =
- task.getAllTaskAttempts();
- for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) {
- if (attempt.getTaskStatus().equals(status)) {
- String hostname = attempt.getHostname();
- TaskID id = attempt.getAttemptId().getTaskID();
-
- Set<TaskID> set = badNodesToFilteredTasks.get(hostname);
-
- if (set == null) {
- set = new TreeSet<TaskID>();
- set.add(id);
- badNodesToFilteredTasks.put(hostname, set);
- }else{
- set.add(id);
- }
- }
- }
- }
- }
- }
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobSubmittedEvent.java b/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobSubmittedEvent.java
deleted file mode 100644
index b1785e0..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapreduce/jobhistory/JobSubmittedEvent.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce.jobhistory;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.mapreduce.JobACL;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.security.authorize.AccessControlList;
-
-import org.apache.avro.util.Utf8;
-
-/**
- * Event to record the submission of a job
- *
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class JobSubmittedEvent implements HistoryEvent {
- private JobSubmitted datum = new JobSubmitted();
-
- /**
- * Create an event to record job submission
- * @param id The job Id of the job
- * @param jobName Name of the job
- * @param userName Name of the user who submitted the job
- * @param submitTime Time of submission
- * @param jobConfPath Path of the Job Configuration file
- * @param jobACLs The configured acls for the job.
- * @param jobQueueName The job-queue to which this job was submitted to
- */
- public JobSubmittedEvent(JobID id, String jobName, String userName,
- long submitTime, String jobConfPath,
- Map<JobACL, AccessControlList> jobACLs, String jobQueueName) {
- datum.jobid = new Utf8(id.toString());
- datum.jobName = new Utf8(jobName);
- datum.userName = new Utf8(userName);
- datum.submitTime = submitTime;
- datum.jobConfPath = new Utf8(jobConfPath);
- Map<Utf8, Utf8> jobAcls = new HashMap<Utf8, Utf8>();
- for (Entry<JobACL, AccessControlList> entry : jobACLs.entrySet()) {
- jobAcls.put(new Utf8(entry.getKey().getAclName()), new Utf8(
- entry.getValue().getAclString()));
- }
- datum.acls = jobAcls;
- if (jobQueueName != null) {
- datum.jobQueueName = new Utf8(jobQueueName);
- }
- }
-
- JobSubmittedEvent() {}
-
- public Object getDatum() { return datum; }
- public void setDatum(Object datum) {
- this.datum = (JobSubmitted)datum;
- }
-
- /** Get the Job Id */
- public JobID getJobId() { return JobID.forName(datum.jobid.toString()); }
- /** Get the Job name */
- public String getJobName() { return datum.jobName.toString(); }
- /** Get the Job queue name */
- public String getJobQueueName() {
- if (datum.jobQueueName != null) {
- return datum.jobQueueName.toString();
- }
- return null;
- }
- /** Get the user name */
- public String getUserName() { return datum.userName.toString(); }
- /** Get the submit time */
- public long getSubmitTime() { return datum.submitTime; }
- /** Get the Path for the Job Configuration file */
- public String getJobConfPath() { return datum.jobConfPath.toString(); }
- /** Get the acls configured for the job **/
- public Map<JobACL, AccessControlList> getJobAcls() {
- Map<JobACL, AccessControlList> jobAcls =
- new HashMap<JobACL, AccessControlList>();
- for (JobACL jobACL : JobACL.values()) {
- Utf8 jobACLsUtf8 = new Utf8(jobACL.getAclName());
- if (datum.acls.containsKey(jobACLsUtf8)) {
- jobAcls.put(jobACL, new AccessControlList(datum.acls.get(
- jobACLsUtf8).toString()));
- }
- }
- return jobAcls;
- }
- /** Get the event type */
- public EventType getEventType() { return EventType.JOB_SUBMITTED; }
-
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java b/mapreduce/src/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java
deleted file mode 100644
index 2c2b4d5..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java
+++ /dev/null
@@ -1,366 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce.protocol;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenSelector;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ipc.VersionedProtocol;
-import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus;
-import org.apache.hadoop.mapreduce.ClusterMetrics;
-import org.apache.hadoop.mapreduce.Counters;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.mapreduce.JobStatus;
-import org.apache.hadoop.mapreduce.QueueAclsInfo;
-import org.apache.hadoop.mapreduce.QueueInfo;
-import org.apache.hadoop.mapreduce.TaskAttemptID;
-import org.apache.hadoop.mapreduce.TaskCompletionEvent;
-import org.apache.hadoop.mapreduce.TaskReport;
-import org.apache.hadoop.mapreduce.TaskTrackerInfo;
-import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
-import org.apache.hadoop.mapreduce.server.jobtracker.State;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.KerberosInfo;
-import org.apache.hadoop.security.authorize.AccessControlList;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenInfo;
-
-/**
- * Protocol that a JobClient and the central JobTracker use to communicate. The
- * JobClient can use these methods to submit a Job for execution, and learn about
- * the current system status.
- */
-@KerberosInfo(
- serverPrincipal = JTConfig.JT_USER_NAME)
-@TokenInfo(DelegationTokenSelector.class)
-@InterfaceAudience.Private
-@InterfaceStability.Stable
-public interface ClientProtocol extends VersionedProtocol {
- /*
- *Changing the versionID to 2L since the getTaskCompletionEvents method has
- *changed.
- *Changed to 4 since killTask(String,boolean) is added
- *Version 4: added jobtracker state to ClusterStatus
- *Version 5: max_tasks in ClusterStatus is replaced by
- * max_map_tasks and max_reduce_tasks for HADOOP-1274
- * Version 6: change the counters representation for HADOOP-2248
- * Version 7: added getAllJobs for HADOOP-2487
- * Version 8: change {job|task}id's to use corresponding objects rather that strings.
- * Version 9: change the counter representation for HADOOP-1915
- * Version 10: added getSystemDir for HADOOP-3135
- * Version 11: changed JobProfile to include the queue name for HADOOP-3698
- * Version 12: Added getCleanupTaskReports and
- * cleanupProgress to JobStatus as part of HADOOP-3150
- * Version 13: Added getJobQueueInfos and getJobQueueInfo(queue name)
- * and getAllJobs(queue) as a part of HADOOP-3930
- * Version 14: Added setPriority for HADOOP-4124
- * Version 15: Added KILLED status to JobStatus as part of HADOOP-3924
- * Version 16: Added getSetupTaskReports and
- * setupProgress to JobStatus as part of HADOOP-4261
- * Version 17: getClusterStatus returns the amount of memory used by
- * the server. HADOOP-4435
- * Version 18: Added blacklisted trackers to the ClusterStatus
- * for HADOOP-4305
- * Version 19: Modified TaskReport to have TIP status and modified the
- * method getClusterStatus() to take a boolean argument
- * for HADOOP-4807
- * Version 20: Modified ClusterStatus to have the tasktracker expiry
- * interval for HADOOP-4939
- * Version 21: Modified TaskID to be aware of the new TaskTypes
- * Version 22: Added method getQueueAclsForCurrentUser to get queue acls info
- * for a user
- * Version 23: Modified the JobQueueInfo class to inlucde queue state.
- * Part of HADOOP-5913.
- * Version 24: Modified ClusterStatus to include BlackListInfo class which
- * encapsulates reasons and report for blacklisted node.
- * Version 25: Added fields to JobStatus for HADOOP-817.
- * Version 26: Added properties to JobQueueInfo as part of MAPREDUCE-861.
- * added new api's getRootQueues and
- * getChildQueues(String queueName)
- * Version 27: Changed protocol to use new api objects. And the protocol is
- * renamed from JobSubmissionProtocol to ClientProtocol.
- * Version 28: Added getJobHistoryDir() as part of MAPREDUCE-975.
- * Version 29: Added reservedSlots, runningTasks and totalJobSubmissions
- * to ClusterMetrics as part of MAPREDUCE-1048.
- * Version 30: Job submission files are uploaded to a staging area under
- * user home dir. JobTracker reads the required files from the
- * staging area using user credentials passed via the rpc.
- * Version 31: Added TokenStorage to submitJob
- * Version 32: Added delegation tokens (add, renew, cancel)
- * Version 33: Added JobACLs to JobStatus as part of MAPREDUCE-1307
- * Version 34: Modified submitJob to use Credentials instead of TokenStorage.
- * Version 35: Added the method getQueueAdmins(queueName) as part of
- * MAPREDUCE-1664.
- * Version 36: Added the method getJobTrackerStatus() as part of
- * MAPREDUCE-2337.
- * Version 37: More efficient serialization format for framework counters
- * (MAPREDUCE-901)
- */
- public static final long versionID = 37L;
-
- /**
- * Allocate a name for the job.
- * @return a unique job name for submitting jobs.
- * @throws IOException
- */
- public JobID getNewJobID() throws IOException, InterruptedException;
-
- /**
- * Submit a Job for execution. Returns the latest profile for
- * that job.
- */
- public JobStatus submitJob(JobID jobId, String jobSubmitDir, Credentials ts)
- throws IOException, InterruptedException;
-
- /**
- * Get the current status of the cluster
- *
- * @return summary of the state of the cluster
- */
- public ClusterMetrics getClusterMetrics()
- throws IOException, InterruptedException;
-
- /**
- * Get JobTracker's state
- *
- * @return {@link State} of the JobTracker
- * @throws IOException
- * @throws InterruptedException
- * @deprecated Use {@link #getJobTrackerStatus()} instead.
- */
- @Deprecated
- public State getJobTrackerState() throws IOException, InterruptedException;
-
- /**
- * Get the JobTracker's status.
- *
- * @return {@link JobTrackerStatus} of the JobTracker
- * @throws IOException
- * @throws InterruptedException
- */
- public JobTrackerStatus getJobTrackerStatus() throws IOException,
- InterruptedException;
-
- public long getTaskTrackerExpiryInterval() throws IOException,
- InterruptedException;
-
- /**
- * Get the administrators of the given job-queue.
- * This method is for hadoop internal use only.
- * @param queueName
- * @return Queue administrators ACL for the queue to which job is
- * submitted to
- * @throws IOException
- */
- public AccessControlList getQueueAdmins(String queueName) throws IOException;
-
- /**
- * Kill the indicated job
- */
- public void killJob(JobID jobid) throws IOException, InterruptedException;
-
- /**
- * Set the priority of the specified job
- * @param jobid ID of the job
- * @param priority Priority to be set for the job
- */
- public void setJobPriority(JobID jobid, String priority)
- throws IOException, InterruptedException;
-
- /**
- * Kill indicated task attempt.
- * @param taskId the id of the task to kill.
- * @param shouldFail if true the task is failed and added to failed tasks list, otherwise
- * it is just killed, w/o affecting job failure status.
- */
- public boolean killTask(TaskAttemptID taskId, boolean shouldFail)
- throws IOException, InterruptedException;
-
- /**
- * Grab a handle to a job that is already known to the JobTracker.
- * @return Status of the job, or null if not found.
- */
- public JobStatus getJobStatus(JobID jobid)
- throws IOException, InterruptedException;
-
- /**
- * Grab the current job counters
- */
- public Counters getJobCounters(JobID jobid)
- throws IOException, InterruptedException;
-
- /**
- * Grab a bunch of info on the tasks that make up the job
- */
- public TaskReport[] getTaskReports(JobID jobid, TaskType type)
- throws IOException, InterruptedException;
-
- /**
- * A MapReduce system always operates on a single filesystem. This
- * function returns the fs name. ('local' if the localfs; 'addr:port'
- * if dfs). The client can then copy files into the right locations
- * prior to submitting the job.
- */
- public String getFilesystemName() throws IOException, InterruptedException;
-
- /**
- * Get all the jobs submitted.
- * @return array of JobStatus for the submitted jobs
- */
- public JobStatus[] getAllJobs() throws IOException, InterruptedException;
-
- /**
- * Get task completion events for the jobid, starting from fromEventId.
- * Returns empty array if no events are available.
- * @param jobid job id
- * @param fromEventId event id to start from.
- * @param maxEvents the max number of events we want to look at
- * @return array of task completion events.
- * @throws IOException
- */
- public TaskCompletionEvent[] getTaskCompletionEvents(JobID jobid,
- int fromEventId, int maxEvents) throws IOException, InterruptedException;
-
- /**
- * Get the diagnostics for a given task in a given job
- * @param taskId the id of the task
- * @return an array of the diagnostic messages
- */
- public String[] getTaskDiagnostics(TaskAttemptID taskId)
- throws IOException, InterruptedException;
-
- /**
- * Get all active trackers in cluster.
- * @return array of TaskTrackerInfo
- */
- public TaskTrackerInfo[] getActiveTrackers()
- throws IOException, InterruptedException;
-
- /**
- * Get all blacklisted trackers in cluster.
- * @return array of TaskTrackerInfo
- */
- public TaskTrackerInfo[] getBlacklistedTrackers()
- throws IOException, InterruptedException;
-
- /**
- * Grab the jobtracker system directory path
- * where job-specific files are to be placed.
- *
- * @return the system directory where job-specific files are to be placed.
- */
- public String getSystemDir() throws IOException, InterruptedException;
-
- /**
- * Get a hint from the JobTracker
- * where job-specific files are to be placed.
- *
- * @return the directory where job-specific files are to be placed.
- */
- public String getStagingAreaDir() throws IOException, InterruptedException;
-
- /**
- * Gets the directory location of the completed job history files.
- * @throws IOException
- * @throws InterruptedException
- */
- public String getJobHistoryDir()
- throws IOException, InterruptedException;
-
- /**
- * Gets set of Queues associated with the Job Tracker
- *
- * @return Array of the Queue Information Object
- * @throws IOException
- */
- public QueueInfo[] getQueues() throws IOException, InterruptedException;
-
- /**
- * Gets scheduling information associated with the particular Job queue
- *
- * @param queueName Queue Name
- * @return Scheduling Information of the Queue
- * @throws IOException
- */
- public QueueInfo getQueue(String queueName)
- throws IOException, InterruptedException;
-
- /**
- * Gets the Queue ACLs for current user
- * @return array of QueueAclsInfo object for current user.
- * @throws IOException
- */
- public QueueAclsInfo[] getQueueAclsForCurrentUser()
- throws IOException, InterruptedException;
-
- /**
- * Gets the root level queues.
- * @return array of JobQueueInfo object.
- * @throws IOException
- */
- public QueueInfo[] getRootQueues() throws IOException, InterruptedException;
-
- /**
- * Returns immediate children of queueName.
- * @param queueName
- * @return array of JobQueueInfo which are children of queueName
- * @throws IOException
- */
- public QueueInfo[] getChildQueues(String queueName)
- throws IOException, InterruptedException;
-
- /**
- * Get a new delegation token.
- * @param renewer the user other than the creator (if any) that can renew the
- * token
- * @return the new delegation token
- * @throws IOException
- * @throws InterruptedException
- */
- public
- Token<DelegationTokenIdentifier> getDelegationToken(Text renewer
- ) throws IOException,
- InterruptedException;
-
- /**
- * Renew an existing delegation token
- * @param token the token to renew
- * @return the new expiration time
- * @throws IOException
- * @throws InterruptedException
- */
- public long renewDelegationToken(Token<DelegationTokenIdentifier> token
- ) throws IOException,
- InterruptedException;
-
- /**
- * Cancel a delegation token.
- * @param token the token to cancel
- * @throws IOException
- * @throws InterruptedException
- */
- public void cancelDelegationToken(Token<DelegationTokenIdentifier> token
- ) throws IOException,
- InterruptedException;
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/security/TokenCache.java b/mapreduce/src/java/org/apache/hadoop/mapreduce/security/TokenCache.java
deleted file mode 100644
index 6fd05fe..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapreduce/security/TokenCache.java
+++ /dev/null
@@ -1,226 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce.security;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.JobTracker;
-import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
-import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-
-
-/**
- * This class provides user facing APIs for transferring secrets from
- * the job client to the tasks.
- * The secrets can be stored just before submission of jobs and read during
- * the task execution.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public class TokenCache {
-
- private static final Log LOG = LogFactory.getLog(TokenCache.class);
-
-
- /**
- * auxiliary method to get user's secret keys..
- * @param alias
- * @return secret key from the storage
- */
- public static byte[] getSecretKey(Credentials credentials, Text alias) {
- if(credentials == null)
- return null;
- return credentials.getSecretKey(alias);
- }
-
- /**
- * Convenience method to obtain delegation tokens from namenodes
- * corresponding to the paths passed.
- * @param credentials
- * @param ps array of paths
- * @param conf configuration
- * @throws IOException
- */
- public static void obtainTokensForNamenodes(Credentials credentials,
- Path[] ps, Configuration conf) throws IOException {
- if (!UserGroupInformation.isSecurityEnabled()) {
- return;
- }
- obtainTokensForNamenodesInternal(credentials, ps, conf);
- }
-
- static void obtainTokensForNamenodesInternal(Credentials credentials,
- Path[] ps, Configuration conf) throws IOException {
- for(Path p: ps) {
- FileSystem fs = FileSystem.get(p.toUri(), conf);
- obtainTokensForNamenodesInternal(fs, credentials, conf);
- }
- }
-
- static String getJTPrincipal(Configuration conf) throws IOException {
- String jtHostname = JobTracker.getAddress(conf).getHostName();
- // get jobtracker principal for use as delegation token renewer
- return SecurityUtil.getServerPrincipal(conf.get(JTConfig.JT_USER_NAME),
- jtHostname);
- }
-
- /**
- * get delegation token for a specific FS
- * @param fs
- * @param credentials
- * @param p
- * @param conf
- * @throws IOException
- */
- static void obtainTokensForNamenodesInternal(FileSystem fs,
- Credentials credentials, Configuration conf) throws IOException {
- String delegTokenRenewer = getJTPrincipal(conf);
- if (delegTokenRenewer == null || delegTokenRenewer.length() == 0) {
- throw new IOException(
- "Can't get JobTracker Kerberos principal for use as renewer");
- }
- boolean readFile = true;
-
- String fsName = fs.getCanonicalServiceName();
- if (TokenCache.getDelegationToken(credentials, fsName) == null) {
- //TODO: Need to come up with a better place to put
- //this block of code to do with reading the file
- if (readFile) {
- readFile = false;
- String binaryTokenFilename =
- conf.get("mapreduce.job.credentials.binary");
- if (binaryTokenFilename != null) {
- Credentials binary;
- try {
- binary = Credentials.readTokenStorageFile(
- new Path("file:///" + binaryTokenFilename), conf);
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- credentials.addAll(binary);
- }
- if (TokenCache.getDelegationToken(credentials, fsName) != null) {
- LOG.debug("DT for " + fsName + " is already present");
- return;
- }
- }
- List<Token<?>> tokens = fs.getDelegationTokens(delegTokenRenewer);
- if (tokens != null) {
- for (Token<?> token : tokens) {
- credentials.addToken(token.getService(), token);
- LOG.info("Got dt for " + fs.getUri() + ";uri="+ fsName +
- ";t.service="+token.getService());
- }
- }
- //Call getDelegationToken as well for now - for FS implementations
- // which may not have implmented getDelegationTokens (hftp)
- Token<?> token = fs.getDelegationToken(delegTokenRenewer);
- if (token != null) {
- Text fsNameText = new Text(fsName);
- token.setService(fsNameText);
- credentials.addToken(fsNameText, token);
- LOG.info("Got dt for " + fs.getUri() + ";uri="+ fsName +
- ";t.service="+token.getService());
- }
- }
- }
-
- /**
- * file name used on HDFS for generated job token
- */
- @InterfaceAudience.Private
- public static final String JOB_TOKEN_HDFS_FILE = "jobToken";
-
- /**
- * conf setting for job tokens cache file name
- */
- @InterfaceAudience.Private
- public static final String JOB_TOKENS_FILENAME = "mapreduce.job.jobTokenFile";
- private static final Text JOB_TOKEN = new Text("ShuffleAndJobToken");
-
- /**
- *
- * @param namenode
- * @return delegation token
- */
- @SuppressWarnings("unchecked")
- @InterfaceAudience.Private
- public static Token<DelegationTokenIdentifier> getDelegationToken(
- Credentials credentials, String namenode) {
- return (Token<DelegationTokenIdentifier>) credentials.getToken(new Text(
- namenode));
- }
-
- /**
- * load job token from a file
- * @param conf
- * @throws IOException
- */
- @InterfaceAudience.Private
- public static Credentials loadTokens(String jobTokenFile, JobConf conf)
- throws IOException {
- Path localJobTokenFile = new Path ("file:///" + jobTokenFile);
-
- Credentials ts = Credentials.readTokenStorageFile(localJobTokenFile, conf);
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("Task: Loaded jobTokenFile from: "+
- localJobTokenFile.toUri().getPath()
- +"; num of sec keys = " + ts.numberOfSecretKeys() +
- " Number of tokens " + ts.numberOfTokens());
- }
- return ts;
- }
- /**
- * store job token
- * @param t
- */
- @InterfaceAudience.Private
- public static void setJobToken(Token<? extends TokenIdentifier> t,
- Credentials credentials) {
- credentials.addToken(JOB_TOKEN, t);
- }
- /**
- *
- * @return job token
- */
- @SuppressWarnings("unchecked")
- @InterfaceAudience.Private
- public static Token<JobTokenIdentifier> getJobToken(Credentials credentials) {
- return (Token<JobTokenIdentifier>) credentials.getToken(JOB_TOKEN);
- }
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java b/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java
deleted file mode 100644
index 0492bdb..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java
+++ /dev/null
@@ -1,173 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapreduce.task.reduce;
-
-import java.io.IOException;
-import java.net.URI;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.MapTaskCompletionEventsUpdate;
-import org.apache.hadoop.mapred.TaskCompletionEvent;
-import org.apache.hadoop.mapred.TaskTracker;
-import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
-import org.apache.hadoop.mapreduce.TaskAttemptID;
-
-class EventFetcher<K,V> extends Thread {
- private static final long SLEEP_TIME = 1000;
- private static final int MAX_EVENTS_TO_FETCH = 10000;
- private static final int MAX_RETRIES = 10;
- private static final int RETRY_PERIOD = 5000;
- private static final Log LOG = LogFactory.getLog(EventFetcher.class);
-
- private final TaskAttemptID reduce;
- private final TaskUmbilicalProtocol umbilical;
- private final ShuffleScheduler<K,V> scheduler;
- private int fromEventId = 0;
- private ExceptionReporter exceptionReporter = null;
-
- private int maxMapRuntime = 0;
-
- public EventFetcher(TaskAttemptID reduce,
- TaskUmbilicalProtocol umbilical,
- ShuffleScheduler<K,V> scheduler,
- ExceptionReporter reporter) {
- setName("EventFetcher for fetching Map Completion Events");
- setDaemon(true);
- this.reduce = reduce;
- this.umbilical = umbilical;
- this.scheduler = scheduler;
- exceptionReporter = reporter;
- }
-
- @Override
- public void run() {
- int failures = 0;
- LOG.info(reduce + " Thread started: " + getName());
-
- try {
- while (true) {
- try {
- int numNewMaps = getMapCompletionEvents();
- failures = 0;
- if (numNewMaps > 0) {
- LOG.info(reduce + ": " + "Got " + numNewMaps + " new map-outputs");
- }
- LOG.debug("GetMapEventsThread about to sleep for " + SLEEP_TIME);
- Thread.sleep(SLEEP_TIME);
- } catch (IOException ie) {
- LOG.info("Exception in getting events", ie);
- // check to see whether to abort
- if (++failures >= MAX_RETRIES) {
- throw new IOException("too many failures downloading events", ie);
- }
- // sleep for a bit
- Thread.sleep(RETRY_PERIOD);
- }
- }
- } catch (InterruptedException e) {
- return;
- } catch (Throwable t) {
- exceptionReporter.reportException(t);
- return;
- }
- }
-
- /**
- * Queries the {@link TaskTracker} for a set of map-completion events
- * from a given event ID.
- * @throws IOException
- */
- private int getMapCompletionEvents() throws IOException {
-
- int numNewMaps = 0;
-
- MapTaskCompletionEventsUpdate update =
- umbilical.getMapCompletionEvents((org.apache.hadoop.mapred.JobID)
- reduce.getJobID(),
- fromEventId,
- MAX_EVENTS_TO_FETCH,
- (org.apache.hadoop.mapred.TaskAttemptID)
- reduce);
- TaskCompletionEvent events[] = update.getMapTaskCompletionEvents();
- LOG.debug("Got " + events.length + " map completion events from " +
- fromEventId);
-
- // Check if the reset is required.
- // Since there is no ordering of the task completion events at the
- // reducer, the only option to sync with the new jobtracker is to reset
- // the events index
- if (update.shouldReset()) {
- fromEventId = 0;
- scheduler.resetKnownMaps();
- }
-
- // Update the last seen event ID
- fromEventId += events.length;
-
- // Process the TaskCompletionEvents:
- // 1. Save the SUCCEEDED maps in knownOutputs to fetch the outputs.
- // 2. Save the OBSOLETE/FAILED/KILLED maps in obsoleteOutputs to stop
- // fetching from those maps.
- // 3. Remove TIPFAILED maps from neededOutputs since we don't need their
- // outputs at all.
- for (TaskCompletionEvent event : events) {
- switch (event.getTaskStatus()) {
- case SUCCEEDED:
- URI u = getBaseURI(event.getTaskTrackerHttp());
- scheduler.addKnownMapOutput(u.getHost() + ":" + u.getPort(),
- u.toString(),
- event.getTaskAttemptId());
- numNewMaps ++;
- int duration = event.getTaskRunTime();
- if (duration > maxMapRuntime) {
- maxMapRuntime = duration;
- scheduler.informMaxMapRunTime(maxMapRuntime);
- }
- break;
- case FAILED:
- case KILLED:
- case OBSOLETE:
- scheduler.obsoleteMapOutput(event.getTaskAttemptId());
- LOG.info("Ignoring obsolete output of " + event.getTaskStatus() +
- " map-task: '" + event.getTaskAttemptId() + "'");
- break;
- case TIPFAILED:
- scheduler.tipFailed(event.getTaskAttemptId().getTaskID());
- LOG.info("Ignoring output of failed map TIP: '" +
- event.getTaskAttemptId() + "'");
- break;
- }
- }
- return numNewMaps;
- }
-
- private URI getBaseURI(String url) {
- StringBuffer baseUrl = new StringBuffer(url);
- if (!url.endsWith("/")) {
- baseUrl.append("/");
- }
- baseUrl.append("mapOutput?job=");
- baseUrl.append(reduce.getJobID());
- baseUrl.append("&reduce=");
- baseUrl.append(reduce.getTaskID().getId());
- baseUrl.append("&map=");
- URI u = URI.create(baseUrl.toString());
- return u;
- }
-}
\ No newline at end of file
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/MapOutput.java b/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/MapOutput.java
deleted file mode 100644
index 45bab8f..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/MapOutput.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapreduce.task.reduce;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.Comparator;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalDirAllocator;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.BoundedByteArrayOutputStream;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.TaskTracker;
-import org.apache.hadoop.mapreduce.TaskAttemptID;
-
-class MapOutput<K,V> {
- private static final Log LOG = LogFactory.getLog(MapOutput.class);
- private static AtomicInteger ID = new AtomicInteger(0);
-
- public static enum Type {
- WAIT,
- MEMORY,
- DISK
- }
-
- private final int id;
-
- private final MergeManager<K,V> merger;
- private final TaskAttemptID mapId;
-
- private final long size;
-
- private final byte[] memory;
- private BoundedByteArrayOutputStream byteStream;
-
- private final FileSystem localFS;
- private final Path tmpOutputPath;
- private final Path outputPath;
- private final OutputStream disk;
-
- private final Type type;
-
- private final boolean primaryMapOutput;
-
- MapOutput(TaskAttemptID mapId, MergeManager<K,V> merger, long size,
- JobConf conf, LocalDirAllocator localDirAllocator,
- int fetcher, boolean primaryMapOutput) throws IOException {
- this.id = ID.incrementAndGet();
- this.mapId = mapId;
- this.merger = merger;
-
- type = Type.DISK;
-
- memory = null;
- byteStream = null;
-
- this.size = size;
-
- this.localFS = FileSystem.getLocal(conf);
- String filename = "map_" + mapId.getTaskID().getId() + ".out";
- String tmpOutput = Path.SEPARATOR +
- TaskTracker.getJobCacheSubdir(conf.getUser()) +
- Path.SEPARATOR + mapId.getJobID() +
- Path.SEPARATOR + merger.getReduceId() +
- Path.SEPARATOR + "output" +
- Path.SEPARATOR + filename +
- "." + fetcher;
-
- tmpOutputPath =
- localDirAllocator.getLocalPathForWrite(tmpOutput, size, conf);
- outputPath = new Path(tmpOutputPath.getParent(), filename);
- disk = localFS.create(tmpOutputPath);
-
- this.primaryMapOutput = primaryMapOutput;
- }
-
- MapOutput(TaskAttemptID mapId, MergeManager<K,V> merger, int size,
- boolean primaryMapOutput) {
- this.id = ID.incrementAndGet();
- this.mapId = mapId;
- this.merger = merger;
-
- type = Type.MEMORY;
- byteStream = new BoundedByteArrayOutputStream(size);
- memory = byteStream.getBuffer();
-
- this.size = size;
-
- localFS = null;
- disk = null;
- outputPath = null;
- tmpOutputPath = null;
-
- this.primaryMapOutput = primaryMapOutput;
- }
-
- public MapOutput(TaskAttemptID mapId) {
- this.id = ID.incrementAndGet();
- this.mapId = mapId;
-
- type = Type.WAIT;
- merger = null;
- memory = null;
- byteStream = null;
-
- size = -1;
-
- localFS = null;
- disk = null;
- outputPath = null;
- tmpOutputPath = null;
-
- this.primaryMapOutput = false;
-}
-
- public boolean isPrimaryMapOutput() {
- return primaryMapOutput;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (obj instanceof MapOutput) {
- return id == ((MapOutput)obj).id;
- }
- return false;
- }
-
- @Override
- public int hashCode() {
- return id;
- }
-
- public Path getOutputPath() {
- return outputPath;
- }
-
- public byte[] getMemory() {
- return memory;
- }
-
- public BoundedByteArrayOutputStream getArrayStream() {
- return byteStream;
- }
-
- public OutputStream getDisk() {
- return disk;
- }
-
- public TaskAttemptID getMapId() {
- return mapId;
- }
-
- public Type getType() {
- return type;
- }
-
- public long getSize() {
- return size;
- }
-
- public void commit() throws IOException {
- if (type == Type.MEMORY) {
- merger.closeInMemoryFile(this);
- } else if (type == Type.DISK) {
- localFS.rename(tmpOutputPath, outputPath);
- merger.closeOnDiskFile(outputPath);
- } else {
- throw new IOException("Cannot commit MapOutput of type WAIT!");
- }
- }
-
- public void abort() {
- if (type == Type.MEMORY) {
- merger.unreserve(memory.length);
- } else if (type == Type.DISK) {
- try {
- localFS.delete(tmpOutputPath, false);
- } catch (IOException ie) {
- LOG.info("failure to clean up " + tmpOutputPath, ie);
- }
- } else {
- throw new IllegalArgumentException
- ("Cannot commit MapOutput with of type WAIT!");
- }
- }
-
- public String toString() {
- return "MapOutput(" + mapId + ", " + type + ")";
- }
-
- public static class MapOutputComparator<K, V>
- implements Comparator<MapOutput<K, V>> {
- public int compare(MapOutput<K, V> o1, MapOutput<K, V> o2) {
- if (o1.id == o2.id) {
- return 0;
- }
-
- if (o1.size < o2.size) {
- return -1;
- } else if (o1.size > o2.size) {
- return 1;
- }
-
- if (o1.id < o2.id) {
- return -1;
- } else {
- return 1;
-
- }
- }
- }
-
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java b/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java
deleted file mode 100644
index 83b4d65..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java
+++ /dev/null
@@ -1,768 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapreduce.task.reduce;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.ChecksumFileSystem;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalDirAllocator;
-import org.apache.hadoop.fs.LocalFileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.DataInputBuffer;
-import org.apache.hadoop.io.RawComparator;
-import org.apache.hadoop.io.compress.CompressionCodec;
-import org.apache.hadoop.mapred.Counters;
-import org.apache.hadoop.mapred.IFile;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.MapOutputFile;
-import org.apache.hadoop.mapred.Merger;
-import org.apache.hadoop.mapred.RawKeyValueIterator;
-import org.apache.hadoop.mapred.Reducer;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapred.Task;
-import org.apache.hadoop.mapred.Counters.Counter;
-import org.apache.hadoop.mapred.IFile.Reader;
-import org.apache.hadoop.mapred.IFile.Writer;
-import org.apache.hadoop.mapred.Merger.Segment;
-import org.apache.hadoop.mapred.Task.CombineOutputCollector;
-import org.apache.hadoop.mapred.Task.CombineValuesIterator;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.mapreduce.TaskAttemptID;
-import org.apache.hadoop.mapreduce.TaskID;
-import org.apache.hadoop.mapreduce.task.reduce.MapOutput.MapOutputComparator;
-import org.apache.hadoop.util.Progress;
-import org.apache.hadoop.util.ReflectionUtils;
-
-@SuppressWarnings(value={"unchecked", "deprecation"})
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class MergeManager<K, V> {
-
- private static final Log LOG = LogFactory.getLog(MergeManager.class);
-
- /* Maximum percentage of the in-memory limit that a single shuffle can
- * consume*/
- private static final float MAX_SINGLE_SHUFFLE_SEGMENT_FRACTION = 0.25f;
-
- private final TaskAttemptID reduceId;
-
- private final JobConf jobConf;
- private final FileSystem localFS;
- private final FileSystem rfs;
- private final LocalDirAllocator localDirAllocator;
-
- protected MapOutputFile mapOutputFile;
-
- Set<MapOutput<K, V>> inMemoryMergedMapOutputs =
- new TreeSet<MapOutput<K,V>>(new MapOutputComparator<K, V>());
- private final IntermediateMemoryToMemoryMerger memToMemMerger;
-
- Set<MapOutput<K, V>> inMemoryMapOutputs =
- new TreeSet<MapOutput<K,V>>(new MapOutputComparator<K, V>());
- private final InMemoryMerger inMemoryMerger;
-
- Set<Path> onDiskMapOutputs = new TreeSet<Path>();
- private final OnDiskMerger onDiskMerger;
-
- private final long memoryLimit;
- private long usedMemory;
- private final long maxSingleShuffleLimit;
-
- private final int memToMemMergeOutputsThreshold;
- private final long mergeThreshold;
-
- private final int ioSortFactor;
-
- private final Reporter reporter;
- private final ExceptionReporter exceptionReporter;
-
- /**
- * Combiner class to run during in-memory merge, if defined.
- */
- private final Class<? extends Reducer> combinerClass;
-
- /**
- * Resettable collector used for combine.
- */
- private final CombineOutputCollector<K,V> combineCollector;
-
- private final Counters.Counter spilledRecordsCounter;
-
- private final Counters.Counter reduceCombineInputCounter;
-
- private final Counters.Counter mergedMapOutputsCounter;
-
- private final CompressionCodec codec;
-
- private final Progress mergePhase;
-
- public MergeManager(TaskAttemptID reduceId, JobConf jobConf,
- FileSystem localFS,
- LocalDirAllocator localDirAllocator,
- Reporter reporter,
- CompressionCodec codec,
- Class<? extends Reducer> combinerClass,
- CombineOutputCollector<K,V> combineCollector,
- Counters.Counter spilledRecordsCounter,
- Counters.Counter reduceCombineInputCounter,
- Counters.Counter mergedMapOutputsCounter,
- ExceptionReporter exceptionReporter,
- Progress mergePhase, MapOutputFile mapOutputFile) {
- this.reduceId = reduceId;
- this.jobConf = jobConf;
- this.localDirAllocator = localDirAllocator;
- this.exceptionReporter = exceptionReporter;
-
- this.reporter = reporter;
- this.codec = codec;
- this.combinerClass = combinerClass;
- this.combineCollector = combineCollector;
- this.reduceCombineInputCounter = reduceCombineInputCounter;
- this.spilledRecordsCounter = spilledRecordsCounter;
- this.mergedMapOutputsCounter = mergedMapOutputsCounter;
- this.mapOutputFile = mapOutputFile;
- this.mapOutputFile.setConf(jobConf);
-
- this.localFS = localFS;
- this.rfs = ((LocalFileSystem)localFS).getRaw();
-
- final float maxInMemCopyUse =
- jobConf.getFloat(MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT, 0.90f);
- if (maxInMemCopyUse > 1.0 || maxInMemCopyUse < 0.0) {
- throw new IllegalArgumentException("Invalid value for " +
- MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT + ": " +
- maxInMemCopyUse);
- }
-
- // Allow unit tests to fix Runtime memory
- this.memoryLimit =
- (long)(jobConf.getLong(MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES,
- Math.min(Runtime.getRuntime().maxMemory(), Integer.MAX_VALUE))
- * maxInMemCopyUse);
-
- this.ioSortFactor = jobConf.getInt(MRJobConfig.IO_SORT_FACTOR, 100);
-
- this.maxSingleShuffleLimit =
- (long)(memoryLimit * MAX_SINGLE_SHUFFLE_SEGMENT_FRACTION);
- this.memToMemMergeOutputsThreshold =
- jobConf.getInt(MRJobConfig.REDUCE_MEMTOMEM_THRESHOLD, ioSortFactor);
- this.mergeThreshold = (long)(this.memoryLimit *
- jobConf.getFloat(MRJobConfig.SHUFFLE_MERGE_EPRCENT,
- 0.90f));
- LOG.info("MergerManager: memoryLimit=" + memoryLimit + ", " +
- "maxSingleShuffleLimit=" + maxSingleShuffleLimit + ", " +
- "mergeThreshold=" + mergeThreshold + ", " +
- "ioSortFactor=" + ioSortFactor + ", " +
- "memToMemMergeOutputsThreshold=" + memToMemMergeOutputsThreshold);
-
- boolean allowMemToMemMerge =
- jobConf.getBoolean(MRJobConfig.REDUCE_MEMTOMEM_ENABLED, false);
- if (allowMemToMemMerge) {
- this.memToMemMerger =
- new IntermediateMemoryToMemoryMerger(this,
- memToMemMergeOutputsThreshold);
- this.memToMemMerger.start();
- } else {
- this.memToMemMerger = null;
- }
-
- this.inMemoryMerger = new InMemoryMerger(this);
- this.inMemoryMerger.start();
-
- this.onDiskMerger = new OnDiskMerger(this);
- this.onDiskMerger.start();
-
- this.mergePhase = mergePhase;
- }
-
-
- TaskAttemptID getReduceId() {
- return reduceId;
- }
-
- public void waitForInMemoryMerge() throws InterruptedException {
- inMemoryMerger.waitForMerge();
- }
-
- private boolean canShuffleToMemory(long requestedSize) {
- return (requestedSize < maxSingleShuffleLimit);
- }
-
- final private MapOutput<K,V> stallShuffle = new MapOutput<K,V>(null);
-
- public synchronized MapOutput<K,V> reserve(TaskAttemptID mapId,
- long requestedSize,
- int fetcher
- ) throws IOException {
- if (!canShuffleToMemory(requestedSize)) {
- LOG.info(mapId + ": Shuffling to disk since " + requestedSize +
- " is greater than maxSingleShuffleLimit (" +
- maxSingleShuffleLimit + ")");
- return new MapOutput<K,V>(mapId, this, requestedSize, jobConf,
- localDirAllocator, fetcher, true);
- }
-
- // Stall shuffle if we are above the memory limit
-
- // It is possible that all threads could just be stalling and not make
- // progress at all. This could happen when:
- //
- // requested size is causing the used memory to go above limit &&
- // requested size < singleShuffleLimit &&
- // current used size < mergeThreshold (merge will not get triggered)
- //
- // To avoid this from happening, we allow exactly one thread to go past
- // the memory limit. We check (usedMemory > memoryLimit) and not
- // (usedMemory + requestedSize > memoryLimit). When this thread is done
- // fetching, this will automatically trigger a merge thereby unlocking
- // all the stalled threads
-
- if (usedMemory > memoryLimit) {
- LOG.debug(mapId + ": Stalling shuffle since usedMemory (" + usedMemory +
- ") is greater than memoryLimit (" + memoryLimit + ")");
-
- return stallShuffle;
- }
-
- // Allow the in-memory shuffle to progress
- LOG.debug(mapId + ": Proceeding with shuffle since usedMemory (" +
- usedMemory +
- ") is lesser than memoryLimit (" + memoryLimit + ")");
- return unconditionalReserve(mapId, requestedSize, true);
- }
-
- /**
- * Unconditional Reserve is used by the Memory-to-Memory thread
- * @return
- */
- private synchronized MapOutput<K, V> unconditionalReserve(
- TaskAttemptID mapId, long requestedSize, boolean primaryMapOutput) {
- usedMemory += requestedSize;
- return new MapOutput<K,V>(mapId, this, (int)requestedSize,
- primaryMapOutput);
- }
-
- synchronized void unreserve(long size) {
- usedMemory -= size;
- }
-
- public synchronized void closeInMemoryFile(MapOutput<K,V> mapOutput) {
- inMemoryMapOutputs.add(mapOutput);
- LOG.info("closeInMemoryFile -> map-output of size: " + mapOutput.getSize()
- + ", inMemoryMapOutputs.size() -> " + inMemoryMapOutputs.size());
-
- synchronized (inMemoryMerger) {
- if (!inMemoryMerger.isInProgress() && usedMemory >= mergeThreshold) {
- LOG.info("Starting inMemoryMerger's merge since usedMemory=" +
- usedMemory + " > mergeThreshold=" + mergeThreshold);
- inMemoryMapOutputs.addAll(inMemoryMergedMapOutputs);
- inMemoryMergedMapOutputs.clear();
- inMemoryMerger.startMerge(inMemoryMapOutputs);
- }
- }
-
- if (memToMemMerger != null) {
- synchronized (memToMemMerger) {
- if (!memToMemMerger.isInProgress() &&
- inMemoryMapOutputs.size() >= memToMemMergeOutputsThreshold) {
- memToMemMerger.startMerge(inMemoryMapOutputs);
- }
- }
- }
- }
-
-
- public synchronized void closeInMemoryMergedFile(MapOutput<K,V> mapOutput) {
- inMemoryMergedMapOutputs.add(mapOutput);
- LOG.info("closeInMemoryMergedFile -> size: " + mapOutput.getSize() +
- ", inMemoryMergedMapOutputs.size() -> " +
- inMemoryMergedMapOutputs.size());
- }
-
- public synchronized void closeOnDiskFile(Path file) {
- onDiskMapOutputs.add(file);
-
- synchronized (onDiskMerger) {
- if (!onDiskMerger.isInProgress() &&
- onDiskMapOutputs.size() >= (2 * ioSortFactor - 1)) {
- onDiskMerger.startMerge(onDiskMapOutputs);
- }
- }
- }
-
- public RawKeyValueIterator close() throws Throwable {
- // Wait for on-going merges to complete
- if (memToMemMerger != null) {
- memToMemMerger.close();
- }
- inMemoryMerger.close();
- onDiskMerger.close();
-
- List<MapOutput<K, V>> memory =
- new ArrayList<MapOutput<K, V>>(inMemoryMergedMapOutputs);
- memory.addAll(inMemoryMapOutputs);
- List<Path> disk = new ArrayList<Path>(onDiskMapOutputs);
- return finalMerge(jobConf, rfs, memory, disk);
- }
-
- private class IntermediateMemoryToMemoryMerger
- extends MergeThread<MapOutput<K, V>, K, V> {
-
- public IntermediateMemoryToMemoryMerger(MergeManager<K, V> manager,
- int mergeFactor) {
- super(manager, mergeFactor, exceptionReporter);
- setName("InMemoryMerger - Thread to do in-memory merge of in-memory " +
- "shuffled map-outputs");
- setDaemon(true);
- }
-
- @Override
- public void merge(List<MapOutput<K, V>> inputs) throws IOException {
- if (inputs == null || inputs.size() == 0) {
- return;
- }
-
- TaskAttemptID dummyMapId = inputs.get(0).getMapId();
- List<Segment<K, V>> inMemorySegments = new ArrayList<Segment<K, V>>();
- long mergeOutputSize =
- createInMemorySegments(inputs, inMemorySegments, 0);
- int noInMemorySegments = inMemorySegments.size();
-
- MapOutput<K, V> mergedMapOutputs =
- unconditionalReserve(dummyMapId, mergeOutputSize, false);
-
- Writer<K, V> writer =
- new InMemoryWriter<K, V>(mergedMapOutputs.getArrayStream());
-
- LOG.info("Initiating Memory-to-Memory merge with " + noInMemorySegments +
- " segments of total-size: " + mergeOutputSize);
-
- RawKeyValueIterator rIter =
- Merger.merge(jobConf, rfs,
- (Class<K>)jobConf.getMapOutputKeyClass(),
- (Class<V>)jobConf.getMapOutputValueClass(),
- inMemorySegments, inMemorySegments.size(),
- new Path(reduceId.toString()),
- (RawComparator<K>)jobConf.getOutputKeyComparator(),
- reporter, null, null, null);
- Merger.writeFile(rIter, writer, reporter, jobConf);
- writer.close();
-
- LOG.info(reduceId +
- " Memory-to-Memory merge of the " + noInMemorySegments +
- " files in-memory complete.");
-
- // Note the output of the merge
- closeInMemoryMergedFile(mergedMapOutputs);
- }
- }
-
- private class InMemoryMerger extends MergeThread<MapOutput<K,V>, K,V> {
-
- public InMemoryMerger(MergeManager<K, V> manager) {
- super(manager, Integer.MAX_VALUE, exceptionReporter);
- setName
- ("InMemoryMerger - Thread to merge in-memory shuffled map-outputs");
- setDaemon(true);
- }
-
- @Override
- public void merge(List<MapOutput<K,V>> inputs) throws IOException {
- if (inputs == null || inputs.size() == 0) {
- return;
- }
-
- //name this output file same as the name of the first file that is
- //there in the current list of inmem files (this is guaranteed to
- //be absent on the disk currently. So we don't overwrite a prev.
- //created spill). Also we need to create the output file now since
- //it is not guaranteed that this file will be present after merge
- //is called (we delete empty files as soon as we see them
- //in the merge method)
-
- //figure out the mapId
- TaskAttemptID mapId = inputs.get(0).getMapId();
- TaskID mapTaskId = mapId.getTaskID();
-
- List<Segment<K, V>> inMemorySegments = new ArrayList<Segment<K, V>>();
- long mergeOutputSize =
- createInMemorySegments(inputs, inMemorySegments,0);
- int noInMemorySegments = inMemorySegments.size();
-
- Path outputPath =
- mapOutputFile.getInputFileForWrite(mapTaskId,
- mergeOutputSize).suffix(
- Task.MERGED_OUTPUT_PREFIX);
-
- Writer<K,V> writer =
- new Writer<K,V>(jobConf, rfs, outputPath,
- (Class<K>) jobConf.getMapOutputKeyClass(),
- (Class<V>) jobConf.getMapOutputValueClass(),
- codec, null);
-
- RawKeyValueIterator rIter = null;
- try {
- LOG.info("Initiating in-memory merge with " + noInMemorySegments +
- " segments...");
-
- rIter = Merger.merge(jobConf, rfs,
- (Class<K>)jobConf.getMapOutputKeyClass(),
- (Class<V>)jobConf.getMapOutputValueClass(),
- inMemorySegments, inMemorySegments.size(),
- new Path(reduceId.toString()),
- (RawComparator<K>)jobConf.getOutputKeyComparator(),
- reporter, spilledRecordsCounter, null, null);
-
- if (null == combinerClass) {
- Merger.writeFile(rIter, writer, reporter, jobConf);
- } else {
- combineCollector.setWriter(writer);
- combineAndSpill(rIter, reduceCombineInputCounter);
- }
- writer.close();
-
- LOG.info(reduceId +
- " Merge of the " + noInMemorySegments +
- " files in-memory complete." +
- " Local file is " + outputPath + " of size " +
- localFS.getFileStatus(outputPath).getLen());
- } catch (IOException e) {
- //make sure that we delete the ondisk file that we created
- //earlier when we invoked cloneFileAttributes
- localFS.delete(outputPath, true);
- throw e;
- }
-
- // Note the output of the merge
- closeOnDiskFile(outputPath);
- }
-
- }
-
- private class OnDiskMerger extends MergeThread<Path,K,V> {
-
- public OnDiskMerger(MergeManager<K, V> manager) {
- super(manager, Integer.MAX_VALUE, exceptionReporter);
- setName("OnDiskMerger - Thread to merge on-disk map-outputs");
- setDaemon(true);
- }
-
- @Override
- public void merge(List<Path> inputs) throws IOException {
- // sanity check
- if (inputs == null || inputs.isEmpty()) {
- LOG.info("No ondisk files to merge...");
- return;
- }
-
- long approxOutputSize = 0;
- int bytesPerSum =
- jobConf.getInt("io.bytes.per.checksum", 512);
-
- LOG.info("OnDiskMerger: We have " + inputs.size() +
- " map outputs on disk. Triggering merge...");
-
- // 1. Prepare the list of files to be merged.
- for (Path file : inputs) {
- approxOutputSize += localFS.getFileStatus(file).getLen();
- }
-
- // add the checksum length
- approxOutputSize +=
- ChecksumFileSystem.getChecksumLength(approxOutputSize, bytesPerSum);
-
- // 2. Start the on-disk merge process
- Path outputPath =
- localDirAllocator.getLocalPathForWrite(inputs.get(0).toString(),
- approxOutputSize, jobConf).suffix(Task.MERGED_OUTPUT_PREFIX);
- Writer<K,V> writer =
- new Writer<K,V>(jobConf, rfs, outputPath,
- (Class<K>) jobConf.getMapOutputKeyClass(),
- (Class<V>) jobConf.getMapOutputValueClass(),
- codec, null);
- RawKeyValueIterator iter = null;
- Path tmpDir = new Path(reduceId.toString());
- try {
- iter = Merger.merge(jobConf, rfs,
- (Class<K>) jobConf.getMapOutputKeyClass(),
- (Class<V>) jobConf.getMapOutputValueClass(),
- codec, inputs.toArray(new Path[inputs.size()]),
- true, ioSortFactor, tmpDir,
- (RawComparator<K>) jobConf.getOutputKeyComparator(),
- reporter, spilledRecordsCounter, null,
- mergedMapOutputsCounter, null);
-
- Merger.writeFile(iter, writer, reporter, jobConf);
- writer.close();
- } catch (IOException e) {
- localFS.delete(outputPath, true);
- throw e;
- }
-
- closeOnDiskFile(outputPath);
-
- LOG.info(reduceId +
- " Finished merging " + inputs.size() +
- " map output files on disk of total-size " +
- approxOutputSize + "." +
- " Local output file is " + outputPath + " of size " +
- localFS.getFileStatus(outputPath).getLen());
- }
- }
-
- private void combineAndSpill(
- RawKeyValueIterator kvIter,
- Counters.Counter inCounter) throws IOException {
- JobConf job = jobConf;
- Reducer combiner = ReflectionUtils.newInstance(combinerClass, job);
- Class<K> keyClass = (Class<K>) job.getMapOutputKeyClass();
- Class<V> valClass = (Class<V>) job.getMapOutputValueClass();
- RawComparator<K> comparator =
- (RawComparator<K>)job.getOutputKeyComparator();
- try {
- CombineValuesIterator values = new CombineValuesIterator(
- kvIter, comparator, keyClass, valClass, job, Reporter.NULL,
- inCounter);
- while (values.more()) {
- combiner.reduce(values.getKey(), values, combineCollector,
- Reporter.NULL);
- values.nextKey();
- }
- } finally {
- combiner.close();
- }
- }
-
- private long createInMemorySegments(List<MapOutput<K,V>> inMemoryMapOutputs,
- List<Segment<K, V>> inMemorySegments,
- long leaveBytes
- ) throws IOException {
- long totalSize = 0L;
- // We could use fullSize could come from the RamManager, but files can be
- // closed but not yet present in inMemoryMapOutputs
- long fullSize = 0L;
- for (MapOutput<K,V> mo : inMemoryMapOutputs) {
- fullSize += mo.getMemory().length;
- }
- while(fullSize > leaveBytes) {
- MapOutput<K,V> mo = inMemoryMapOutputs.remove(0);
- byte[] data = mo.getMemory();
- long size = data.length;
- totalSize += size;
- fullSize -= size;
- Reader<K,V> reader = new InMemoryReader<K,V>(MergeManager.this,
- mo.getMapId(),
- data, 0, (int)size);
- inMemorySegments.add(new Segment<K,V>(reader, true,
- (mo.isPrimaryMapOutput() ?
- mergedMapOutputsCounter : null)));
- }
- return totalSize;
- }
-
- class RawKVIteratorReader extends IFile.Reader<K,V> {
-
- private final RawKeyValueIterator kvIter;
-
- public RawKVIteratorReader(RawKeyValueIterator kvIter, long size)
- throws IOException {
- super(null, null, size, null, spilledRecordsCounter);
- this.kvIter = kvIter;
- }
- public boolean nextRawKey(DataInputBuffer key) throws IOException {
- if (kvIter.next()) {
- final DataInputBuffer kb = kvIter.getKey();
- final int kp = kb.getPosition();
- final int klen = kb.getLength() - kp;
- key.reset(kb.getData(), kp, klen);
- bytesRead += klen;
- return true;
- }
- return false;
- }
- public void nextRawValue(DataInputBuffer value) throws IOException {
- final DataInputBuffer vb = kvIter.getValue();
- final int vp = vb.getPosition();
- final int vlen = vb.getLength() - vp;
- value.reset(vb.getData(), vp, vlen);
- bytesRead += vlen;
- }
- public long getPosition() throws IOException {
- return bytesRead;
- }
-
- public void close() throws IOException {
- kvIter.close();
- }
- }
-
- private RawKeyValueIterator finalMerge(JobConf job, FileSystem fs,
- List<MapOutput<K,V>> inMemoryMapOutputs,
- List<Path> onDiskMapOutputs
- ) throws IOException {
- LOG.info("finalMerge called with " +
- inMemoryMapOutputs.size() + " in-memory map-outputs and " +
- onDiskMapOutputs.size() + " on-disk map-outputs");
-
- final float maxRedPer =
- job.getFloat(MRJobConfig.REDUCE_INPUT_BUFFER_PERCENT, 0f);
- if (maxRedPer > 1.0 || maxRedPer < 0.0) {
- throw new IOException(MRJobConfig.REDUCE_INPUT_BUFFER_PERCENT +
- maxRedPer);
- }
- int maxInMemReduce = (int)Math.min(
- Runtime.getRuntime().maxMemory() * maxRedPer, Integer.MAX_VALUE);
-
-
- // merge config params
- Class<K> keyClass = (Class<K>)job.getMapOutputKeyClass();
- Class<V> valueClass = (Class<V>)job.getMapOutputValueClass();
- boolean keepInputs = job.getKeepFailedTaskFiles();
- final Path tmpDir = new Path(reduceId.toString());
- final RawComparator<K> comparator =
- (RawComparator<K>)job.getOutputKeyComparator();
-
- // segments required to vacate memory
- List<Segment<K,V>> memDiskSegments = new ArrayList<Segment<K,V>>();
- long inMemToDiskBytes = 0;
- boolean mergePhaseFinished = false;
- if (inMemoryMapOutputs.size() > 0) {
- TaskID mapId = inMemoryMapOutputs.get(0).getMapId().getTaskID();
- inMemToDiskBytes = createInMemorySegments(inMemoryMapOutputs,
- memDiskSegments,
- maxInMemReduce);
- final int numMemDiskSegments = memDiskSegments.size();
- if (numMemDiskSegments > 0 &&
- ioSortFactor > onDiskMapOutputs.size()) {
-
- // If we reach here, it implies that we have less than io.sort.factor
- // disk segments and this will be incremented by 1 (result of the
- // memory segments merge). Since this total would still be
- // <= io.sort.factor, we will not do any more intermediate merges,
- // the merge of all these disk segments would be directly fed to the
- // reduce method
-
- mergePhaseFinished = true;
- // must spill to disk, but can't retain in-mem for intermediate merge
- final Path outputPath =
- mapOutputFile.getInputFileForWrite(mapId,
- inMemToDiskBytes).suffix(
- Task.MERGED_OUTPUT_PREFIX);
- final RawKeyValueIterator rIter = Merger.merge(job, fs,
- keyClass, valueClass, memDiskSegments, numMemDiskSegments,
- tmpDir, comparator, reporter, spilledRecordsCounter, null,
- mergePhase);
- final Writer<K,V> writer = new Writer<K,V>(job, fs, outputPath,
- keyClass, valueClass, codec, null);
- try {
- Merger.writeFile(rIter, writer, reporter, job);
- // add to list of final disk outputs.
- onDiskMapOutputs.add(outputPath);
- } catch (IOException e) {
- if (null != outputPath) {
- try {
- fs.delete(outputPath, true);
- } catch (IOException ie) {
- // NOTHING
- }
- }
- throw e;
- } finally {
- if (null != writer) {
- writer.close();
- }
- }
- LOG.info("Merged " + numMemDiskSegments + " segments, " +
- inMemToDiskBytes + " bytes to disk to satisfy " +
- "reduce memory limit");
- inMemToDiskBytes = 0;
- memDiskSegments.clear();
- } else if (inMemToDiskBytes != 0) {
- LOG.info("Keeping " + numMemDiskSegments + " segments, " +
- inMemToDiskBytes + " bytes in memory for " +
- "intermediate, on-disk merge");
- }
- }
-
- // segments on disk
- List<Segment<K,V>> diskSegments = new ArrayList<Segment<K,V>>();
- long onDiskBytes = inMemToDiskBytes;
- Path[] onDisk = onDiskMapOutputs.toArray(new Path[onDiskMapOutputs.size()]);
- for (Path file : onDisk) {
- onDiskBytes += fs.getFileStatus(file).getLen();
- LOG.debug("Disk file: " + file + " Length is " +
- fs.getFileStatus(file).getLen());
- diskSegments.add(new Segment<K, V>(job, fs, file, codec, keepInputs,
- (file.toString().endsWith(
- Task.MERGED_OUTPUT_PREFIX) ?
- null : mergedMapOutputsCounter)
- ));
- }
- LOG.info("Merging " + onDisk.length + " files, " +
- onDiskBytes + " bytes from disk");
- Collections.sort(diskSegments, new Comparator<Segment<K,V>>() {
- public int compare(Segment<K, V> o1, Segment<K, V> o2) {
- if (o1.getLength() == o2.getLength()) {
- return 0;
- }
- return o1.getLength() < o2.getLength() ? -1 : 1;
- }
- });
-
- // build final list of segments from merged backed by disk + in-mem
- List<Segment<K,V>> finalSegments = new ArrayList<Segment<K,V>>();
- long inMemBytes = createInMemorySegments(inMemoryMapOutputs,
- finalSegments, 0);
- LOG.info("Merging " + finalSegments.size() + " segments, " +
- inMemBytes + " bytes from memory into reduce");
- if (0 != onDiskBytes) {
- final int numInMemSegments = memDiskSegments.size();
- diskSegments.addAll(0, memDiskSegments);
- memDiskSegments.clear();
- // Pass mergePhase only if there is a going to be intermediate
- // merges. See comment where mergePhaseFinished is being set
- Progress thisPhase = (mergePhaseFinished) ? null : mergePhase;
- RawKeyValueIterator diskMerge = Merger.merge(
- job, fs, keyClass, valueClass, diskSegments,
- ioSortFactor, numInMemSegments, tmpDir, comparator,
- reporter, false, spilledRecordsCounter, null, thisPhase);
- diskSegments.clear();
- if (0 == finalSegments.size()) {
- return diskMerge;
- }
- finalSegments.add(new Segment<K,V>(
- new RawKVIteratorReader(diskMerge, onDiskBytes), true));
- }
- return Merger.merge(job, fs, keyClass, valueClass,
- finalSegments, finalSegments.size(), tmpDir,
- comparator, reporter, spilledRecordsCounter, null,
- null);
-
- }
-}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/util/LinuxResourceCalculatorPlugin.java b/mapreduce/src/java/org/apache/hadoop/mapreduce/util/LinuxResourceCalculatorPlugin.java
deleted file mode 100644
index 62afe1d3..0000000
--- a/mapreduce/src/java/org/apache/hadoop/mapreduce/util/LinuxResourceCalculatorPlugin.java
+++ /dev/null
@@ -1,411 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce.util;
-
-import java.io.BufferedReader;
-import java.io.FileNotFoundException;
-import java.io.FileReader;
-import java.io.IOException;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.mapred.TaskTrackerStatus;
-
-/**
- * Plugin to calculate resource information on Linux systems.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class LinuxResourceCalculatorPlugin extends ResourceCalculatorPlugin {
- private static final Log LOG =
- LogFactory.getLog(LinuxResourceCalculatorPlugin.class);
-
- /**
- * proc's meminfo virtual file has keys-values in the format
- * "key:[ \t]*value[ \t]kB".
- */
- private static final String PROCFS_MEMFILE = "/proc/meminfo";
- private static final Pattern PROCFS_MEMFILE_FORMAT =
- Pattern.compile("^([a-zA-Z]*):[ \t]*([0-9]*)[ \t]kB");
-
- // We need the values for the following keys in meminfo
- private static final String MEMTOTAL_STRING = "MemTotal";
- private static final String SWAPTOTAL_STRING = "SwapTotal";
- private static final String MEMFREE_STRING = "MemFree";
- private static final String SWAPFREE_STRING = "SwapFree";
- private static final String INACTIVE_STRING = "Inactive";
-
- /**
- * Patterns for parsing /proc/cpuinfo
- */
- private static final String PROCFS_CPUINFO = "/proc/cpuinfo";
- private static final Pattern PROCESSOR_FORMAT =
- Pattern.compile("^processor[ \t]:[ \t]*([0-9]*)");
- private static final Pattern FREQUENCY_FORMAT =
- Pattern.compile("^cpu MHz[ \t]*:[ \t]*([0-9.]*)");
-
- /**
- * Pattern for parsing /proc/stat
- */
- private static final String PROCFS_STAT = "/proc/stat";
- private static final Pattern CPU_TIME_FORMAT =
- Pattern.compile("^cpu[ \t]*([0-9]*)" +
- "[ \t]*([0-9]*)[ \t]*([0-9]*)[ \t].*");
-
- private String procfsMemFile;
- private String procfsCpuFile;
- private String procfsStatFile;
- long jiffyLengthInMillis;
-
- private long ramSize = 0;
- private long swapSize = 0;
- private long ramSizeFree = 0; // free ram space on the machine (kB)
- private long swapSizeFree = 0; // free swap space on the machine (kB)
- private long inactiveSize = 0; // inactive cache memory (kB)
- private int numProcessors = 0; // number of processors on the system
- private long cpuFrequency = 0L; // CPU frequency on the system (kHz)
- private long cumulativeCpuTime = 0L; // CPU used time since system is on (ms)
- private long lastCumulativeCpuTime = 0L; // CPU used time read last time (ms)
- // Unix timestamp while reading the CPU time (ms)
- private float cpuUsage = TaskTrackerStatus.UNAVAILABLE;
- private long sampleTime = TaskTrackerStatus.UNAVAILABLE;
- private long lastSampleTime = TaskTrackerStatus.UNAVAILABLE;
- private ProcfsBasedProcessTree pTree = null;
-
- boolean readMemInfoFile = false;
- boolean readCpuInfoFile = false;
-
- /**
- * Get current time
- * @return Unix time stamp in millisecond
- */
- long getCurrentTime() {
- return System.currentTimeMillis();
- }
-
- public LinuxResourceCalculatorPlugin() {
- procfsMemFile = PROCFS_MEMFILE;
- procfsCpuFile = PROCFS_CPUINFO;
- procfsStatFile = PROCFS_STAT;
- jiffyLengthInMillis = ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS;
- String pid = System.getenv().get("JVM_PID");
- pTree = new ProcfsBasedProcessTree(pid);
- }
-
- /**
- * Constructor which allows assigning the /proc/ directories. This will be
- * used only in unit tests
- * @param procfsMemFile fake file for /proc/meminfo
- * @param procfsCpuFile fake file for /proc/cpuinfo
- * @param procfsStatFile fake file for /proc/stat
- * @param jiffyLengthInMillis fake jiffy length value
- */
- public LinuxResourceCalculatorPlugin(String procfsMemFile,
- String procfsCpuFile,
- String procfsStatFile,
- long jiffyLengthInMillis) {
- this.procfsMemFile = procfsMemFile;
- this.procfsCpuFile = procfsCpuFile;
- this.procfsStatFile = procfsStatFile;
- this.jiffyLengthInMillis = jiffyLengthInMillis;
- String pid = System.getenv().get("JVM_PID");
- pTree = new ProcfsBasedProcessTree(pid);
- }
-
- /**
- * Read /proc/meminfo, parse and compute memory information only once
- */
- private void readProcMemInfoFile() {
- readProcMemInfoFile(false);
- }
-
- /**
- * Read /proc/meminfo, parse and compute memory information
- * @param readAgain if false, read only on the first time
- */
- private void readProcMemInfoFile(boolean readAgain) {
-
- if (readMemInfoFile && !readAgain) {
- return;
- }
-
- // Read "/proc/memInfo" file
- BufferedReader in = null;
- FileReader fReader = null;
- try {
- fReader = new FileReader(procfsMemFile);
- in = new BufferedReader(fReader);
- } catch (FileNotFoundException f) {
- // shouldn't happen....
- return;
- }
-
- Matcher mat = null;
-
- try {
- String str = in.readLine();
- while (str != null) {
- mat = PROCFS_MEMFILE_FORMAT.matcher(str);
- if (mat.find()) {
- if (mat.group(1).equals(MEMTOTAL_STRING)) {
- ramSize = Long.parseLong(mat.group(2));
- } else if (mat.group(1).equals(SWAPTOTAL_STRING)) {
- swapSize = Long.parseLong(mat.group(2));
- } else if (mat.group(1).equals(MEMFREE_STRING)) {
- ramSizeFree = Long.parseLong(mat.group(2));
- } else if (mat.group(1).equals(SWAPFREE_STRING)) {
- swapSizeFree = Long.parseLong(mat.group(2));
- } else if (mat.group(1).equals(INACTIVE_STRING)) {
- inactiveSize = Long.parseLong(mat.group(2));
- }
- }
- str = in.readLine();
- }
- } catch (IOException io) {
- LOG.warn("Error reading the stream " + io);
- } finally {
- // Close the streams
- try {
- fReader.close();
- try {
- in.close();
- } catch (IOException i) {
- LOG.warn("Error closing the stream " + in);
- }
- } catch (IOException i) {
- LOG.warn("Error closing the stream " + fReader);
- }
- }
-
- readMemInfoFile = true;
- }
-
- /**
- * Read /proc/cpuinfo, parse and calculate CPU information
- */
- private void readProcCpuInfoFile() {
- // This directory needs to be read only once
- if (readCpuInfoFile) {
- return;
- }
- // Read "/proc/cpuinfo" file
- BufferedReader in = null;
- FileReader fReader = null;
- try {
- fReader = new FileReader(procfsCpuFile);
- in = new BufferedReader(fReader);
- } catch (FileNotFoundException f) {
- // shouldn't happen....
- return;
- }
- Matcher mat = null;
- try {
- numProcessors = 0;
- String str = in.readLine();
- while (str != null) {
- mat = PROCESSOR_FORMAT.matcher(str);
- if (mat.find()) {
- numProcessors++;
- }
- mat = FREQUENCY_FORMAT.matcher(str);
- if (mat.find()) {
- cpuFrequency = (long)(Double.parseDouble(mat.group(1)) * 1000); // kHz
- }
- str = in.readLine();
- }
- } catch (IOException io) {
- LOG.warn("Error reading the stream " + io);
- } finally {
- // Close the streams
- try {
- fReader.close();
- try {
- in.close();
- } catch (IOException i) {
- LOG.warn("Error closing the stream " + in);
- }
- } catch (IOException i) {
- LOG.warn("Error closing the stream " + fReader);
- }
- }
- readCpuInfoFile = true;
- }
-
- /**
- * Read /proc/stat file, parse and calculate cumulative CPU
- */
- private void readProcStatFile() {
- // Read "/proc/stat" file
- BufferedReader in = null;
- FileReader fReader = null;
- try {
- fReader = new FileReader(procfsStatFile);
- in = new BufferedReader(fReader);
- } catch (FileNotFoundException f) {
- // shouldn't happen....
- return;
- }
-
- Matcher mat = null;
- try {
- String str = in.readLine();
- while (str != null) {
- mat = CPU_TIME_FORMAT.matcher(str);
- if (mat.find()) {
- long uTime = Long.parseLong(mat.group(1));
- long nTime = Long.parseLong(mat.group(2));
- long sTime = Long.parseLong(mat.group(3));
- cumulativeCpuTime = uTime + nTime + sTime; // milliseconds
- break;
- }
- str = in.readLine();
- }
- cumulativeCpuTime *= jiffyLengthInMillis;
- } catch (IOException io) {
- LOG.warn("Error reading the stream " + io);
- } finally {
- // Close the streams
- try {
- fReader.close();
- try {
- in.close();
- } catch (IOException i) {
- LOG.warn("Error closing the stream " + in);
- }
- } catch (IOException i) {
- LOG.warn("Error closing the stream " + fReader);
- }
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public long getPhysicalMemorySize() {
- readProcMemInfoFile();
- return ramSize * 1024;
- }
-
- /** {@inheritDoc} */
- @Override
- public long getVirtualMemorySize() {
- readProcMemInfoFile();
- return (ramSize + swapSize) * 1024;
- }
-
- /** {@inheritDoc} */
- @Override
- public long getAvailablePhysicalMemorySize() {
- readProcMemInfoFile(true);
- return (ramSizeFree + inactiveSize) * 1024;
- }
-
- /** {@inheritDoc} */
- @Override
- public long getAvailableVirtualMemorySize() {
- readProcMemInfoFile(true);
- return (ramSizeFree + swapSizeFree + inactiveSize) * 1024;
- }
-
- /** {@inheritDoc} */
- @Override
- public int getNumProcessors() {
- readProcCpuInfoFile();
- return numProcessors;
- }
-
- /** {@inheritDoc} */
- @Override
- public long getCpuFrequency() {
- readProcCpuInfoFile();
- return cpuFrequency;
- }
-
- /** {@inheritDoc} */
- @Override
- public long getCumulativeCpuTime() {
- readProcStatFile();
- return cumulativeCpuTime;
- }
-
- /** {@inheritDoc} */
- @Override
- public float getCpuUsage() {
- readProcStatFile();
- sampleTime = getCurrentTime();
- if (lastSampleTime == TaskTrackerStatus.UNAVAILABLE ||
- lastSampleTime > sampleTime) {
- // lastSampleTime > sampleTime may happen when the system time is changed
- lastSampleTime = sampleTime;
- lastCumulativeCpuTime = cumulativeCpuTime;
- return cpuUsage;
- }
- // When lastSampleTime is sufficiently old, update cpuUsage.
- // Also take a sample of the current time and cumulative CPU time for the
- // use of the next calculation.
- final long MINIMUM_UPDATE_INTERVAL = 10 * jiffyLengthInMillis;
- if (sampleTime > lastSampleTime + MINIMUM_UPDATE_INTERVAL) {
- cpuUsage = (float)(cumulativeCpuTime - lastCumulativeCpuTime) * 100F /
- ((float)(sampleTime - lastSampleTime) * getNumProcessors());
- lastSampleTime = sampleTime;
- lastCumulativeCpuTime = cumulativeCpuTime;
- }
- return cpuUsage;
- }
-
- /**
- * Test the {@link LinuxResourceCalculatorPlugin}
- *
- * @param args
- */
- public static void main(String[] args) {
- LinuxResourceCalculatorPlugin plugin = new LinuxResourceCalculatorPlugin();
- System.out.println("Physical memory Size (bytes) : "
- + plugin.getPhysicalMemorySize());
- System.out.println("Total Virtual memory Size (bytes) : "
- + plugin.getVirtualMemorySize());
- System.out.println("Available Physical memory Size (bytes) : "
- + plugin.getAvailablePhysicalMemorySize());
- System.out.println("Total Available Virtual memory Size (bytes) : "
- + plugin.getAvailableVirtualMemorySize());
- System.out.println("Number of Processors : " + plugin.getNumProcessors());
- System.out.println("CPU frequency (kHz) : " + plugin.getCpuFrequency());
- System.out.println("Cumulative CPU time (ms) : " +
- plugin.getCumulativeCpuTime());
- try {
- // Sleep so we can compute the CPU usage
- Thread.sleep(500L);
- } catch (InterruptedException e) {
- // do nothing
- }
- System.out.println("CPU usage % : " + plugin.getCpuUsage());
- }
-
- @Override
- public ProcResourceValues getProcResourceValues() {
- pTree = pTree.getProcessTree();
- long cpuTime = pTree.getCumulativeCpuTime();
- long pMem = pTree.getCumulativeRssmem();
- long vMem = pTree.getCumulativeVmem();
- return new ProcResourceValues(cpuTime, pMem, vMem);
- }
-}
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJvmManager.java b/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJvmManager.java
deleted file mode 100644
index 8c47cf3..0000000
--- a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJvmManager.java
+++ /dev/null
@@ -1,327 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.FileReader;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Vector;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.mapred.JvmManager.JvmManagerForType;
-import org.apache.hadoop.mapred.JvmManager.JvmManagerForType.JvmRunner;
-import org.apache.hadoop.mapred.TaskTracker.TaskInProgress;
-import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
-import org.junit.After;
-import static org.junit.Assert.*;
-import org.junit.Before;
-import org.junit.Test;
-
-public class TestJvmManager {
- static final Log LOG = LogFactory.getLog(TestJvmManager.class);
-
- private static File TEST_DIR = new File(System.getProperty("test.build.data",
- "/tmp"), TestJvmManager.class.getSimpleName());
- private static int MAP_SLOTS = 10;
- private static int REDUCE_SLOTS = 10;
- private TaskTracker tt;
- private JvmManager jvmManager;
- private JobConf ttConf;
-
- @Before
- public void setUp() {
- TEST_DIR.mkdirs();
- }
-
- @After
- public void tearDown() throws IOException {
- FileUtil.fullyDelete(TEST_DIR);
- }
-
- public TestJvmManager() throws Exception {
- tt = new TaskTracker();
- ttConf = new JobConf();
- ttConf.setLong(TTConfig.TT_SLEEP_TIME_BEFORE_SIG_KILL, 2000);
- tt.setConf(ttConf);
- tt.setMaxMapSlots(MAP_SLOTS);
- tt.setMaxReduceSlots(REDUCE_SLOTS);
- tt.setTaskController(new DefaultTaskController());
- jvmManager = new JvmManager(tt);
- tt.setJvmManagerInstance(jvmManager);
- }
-
- // write a shell script to execute the command.
- private File writeScript(String fileName, String cmd, File pidFile) throws IOException {
- File script = new File(TEST_DIR, fileName);
- FileOutputStream out = new FileOutputStream(script);
- // write pid into a file
- out.write(("echo $$ >" + pidFile.toString() + ";").getBytes());
- // ignore SIGTERM
- out.write(("trap '' 15\n").getBytes());
- // write the actual command it self.
- out.write(cmd.getBytes());
- out.close();
- script.setExecutable(true);
- return script;
- }
-
- /**
- * Tests the jvm kill from JvmRunner and JvmManager simultaneously.
- *
- * Starts a process, which sleeps for 60 seconds, in a thread.
- * Calls JvmRunner.kill() in a thread.
- * Also calls JvmManager.taskKilled().
- * Makes sure that the jvm is killed and JvmManager could launch another task
- * properly.
- * @throws Exception
- */
- @Test
- public void testJvmKill() throws Exception {
- JvmManagerForType mapJvmManager = jvmManager
- .getJvmManagerForType(TaskType.MAP);
- // launch a jvm
- JobConf taskConf = new JobConf(ttConf);
- TaskAttemptID attemptID = new TaskAttemptID("test", 0, TaskType.MAP, 0, 0);
- Task task = new MapTask(null, attemptID, 0, null, 1);
- task.setConf(taskConf);
- TaskInProgress tip = tt.new TaskInProgress(task, taskConf);
- File pidFile = new File(TEST_DIR, "pid");
- final TaskRunner taskRunner = task.createRunner(tt, tip);
- // launch a jvm which sleeps for 60 seconds
- final Vector<String> vargs = new Vector<String>(2);
- vargs.add(writeScript("SLEEP", "sleep 60\n", pidFile).getAbsolutePath());
- final File workDir = new File(TEST_DIR, "work");
- workDir.mkdir();
- final File stdout = new File(TEST_DIR, "stdout");
- final File stderr = new File(TEST_DIR, "stderr");
-
- // launch the process and wait in a thread, till it finishes
- Thread launcher = new Thread() {
- public void run() {
- try {
- taskRunner.launchJvmAndWait(null, vargs, stdout, stderr, 100,
- workDir, null);
- } catch (InterruptedException e) {
- e.printStackTrace();
- return;
- }
- }
- };
- launcher.start();
- // wait till the jvm is launched
- // this loop waits for at most 1 second
- for (int i = 0; i < 10; i++) {
- if (pidFile.exists()) {
- break;
- }
- UtilsForTests.waitFor(100);
- }
- // assert that the process is launched
- assertTrue("pidFile is not present", pidFile.exists());
-
- // imitate Child code.
- // set pid in jvmManager
- BufferedReader in = new BufferedReader(new FileReader(pidFile));
- String pid = in.readLine();
- in.close();
- JVMId jvmid = mapJvmManager.runningTaskToJvm.get(taskRunner);
- jvmManager.setPidToJvm(jvmid, pid);
-
- // kill JvmRunner
- final JvmRunner jvmRunner = mapJvmManager.jvmIdToRunner.get(jvmid);
- Thread killer = new Thread() {
- public void run() {
- jvmRunner.kill();
- }
- };
- killer.start();
-
- //wait for a while so that killer thread is started.
- Thread.sleep(100);
-
- // kill the jvm externally
- taskRunner.kill();
-
- assertTrue(jvmRunner.killed);
-
- // launch another jvm and see it finishes properly
- attemptID = new TaskAttemptID("test", 0, TaskType.MAP, 0, 1);
- task = new MapTask(null, attemptID, 0, null, 1);
- task.setConf(taskConf);
- tip = tt.new TaskInProgress(task, taskConf);
- TaskRunner taskRunner2 = task.createRunner(tt, tip);
- // build dummy vargs to call ls
- Vector<String> vargs2 = new Vector<String>(1);
- vargs2.add(writeScript("LS", "ls", pidFile).getAbsolutePath());
- File workDir2 = new File(TEST_DIR, "work2");
- workDir.mkdir();
- File stdout2 = new File(TEST_DIR, "stdout2");
- File stderr2 = new File(TEST_DIR, "stderr2");
- taskRunner2.launchJvmAndWait(null, vargs2, stdout2, stderr2, 100, workDir2,
- null);
- // join all the threads
- killer.join();
- jvmRunner.join();
- launcher.join();
- }
-
-
- /**
- * Create a bunch of tasks and use a special hash map to detect
- * racy access to the various internal data structures of JvmManager.
- * (Regression test for MAPREDUCE-2224)
- */
- @Test
- public void testForRaces() throws Exception {
- JvmManagerForType mapJvmManager = jvmManager
- .getJvmManagerForType(TaskType.MAP);
-
- // Sub out the HashMaps for maps that will detect racy access.
- mapJvmManager.jvmToRunningTask = new RaceHashMap<JVMId, TaskRunner>();
- mapJvmManager.runningTaskToJvm = new RaceHashMap<TaskRunner, JVMId>();
- mapJvmManager.jvmIdToRunner = new RaceHashMap<JVMId, JvmRunner>();
-
- // Launch a bunch of JVMs, but only allow MAP_SLOTS to run at once.
- final ExecutorService exec = Executors.newFixedThreadPool(MAP_SLOTS);
- final AtomicReference<Throwable> failed =
- new AtomicReference<Throwable>();
-
- for (int i = 0; i < MAP_SLOTS*5; i++) {
- JobConf taskConf = new JobConf(ttConf);
- TaskAttemptID attemptID = new TaskAttemptID("test", 0, TaskType.MAP, i, 0);
- Task task = new MapTask(null, attemptID, i, null, 1);
- task.setConf(taskConf);
- TaskInProgress tip = tt.new TaskInProgress(task, taskConf);
- File pidFile = new File(TEST_DIR, "pid_" + i);
- final TaskRunner taskRunner = task.createRunner(tt, tip);
- // launch a jvm which sleeps for 60 seconds
- final Vector<String> vargs = new Vector<String>(2);
- vargs.add(writeScript("script_" + i, "echo hi\n", pidFile).getAbsolutePath());
- final File workDir = new File(TEST_DIR, "work_" + i);
- workDir.mkdir();
- final File stdout = new File(TEST_DIR, "stdout_" + i);
- final File stderr = new File(TEST_DIR, "stderr_" + i);
-
- // launch the process and wait in a thread, till it finishes
- Runnable launcher = new Runnable() {
- public void run() {
- try {
- taskRunner.launchJvmAndWait(null, vargs, stdout, stderr, 100,
- workDir, null);
- } catch (Throwable t) {
- failed.compareAndSet(null, t);
- exec.shutdownNow();
- return;
- }
- }
- };
- exec.submit(launcher);
- }
-
- exec.shutdown();
- exec.awaitTermination(3, TimeUnit.MINUTES);
- if (failed.get() != null) {
- throw new RuntimeException(failed.get());
- }
- }
-
- /**
- * HashMap which detects racy usage by sleeping during operations
- * and checking that no other threads access the map while asleep.
- */
- static class RaceHashMap<K,V> extends HashMap<K,V> {
- Object syncData = new Object();
- RuntimeException userStack = null;
- boolean raced = false;
-
- private void checkInUse() {
- synchronized (syncData) {
- RuntimeException thisStack = new RuntimeException(Thread.currentThread().toString());
-
- if (userStack != null && raced == false) {
- RuntimeException other = userStack;
- raced = true;
- LOG.fatal("Race between two threads.");
- LOG.fatal("First", thisStack);
- LOG.fatal("Second", other);
- throw new RuntimeException("Raced");
- } else {
- userStack = thisStack;
- }
- }
- }
-
- private void sleepABit() {
- try {
- Thread.sleep(60);
- } catch (InterruptedException ie) {
- Thread.currentThread().interrupt();
- }
- }
-
- private void done() {
- synchronized (syncData) {
- userStack = null;
- }
- }
-
- @Override
- public V get(Object key) {
- checkInUse();
- try {
- sleepABit();
- return super.get(key);
- } finally {
- done();
- }
- }
-
- @Override
- public boolean containsKey(Object key) {
- checkInUse();
- try {
- sleepABit();
- return super.containsKey(key);
- } finally {
- done();
- }
- }
-
- @Override
- public V put(K key, V val) {
- checkInUse();
- try {
- sleepABit();
- return super.put(key, val);
- } finally {
- done();
- }
- }
- }
-
-}
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java b/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java
deleted file mode 100644
index 96ed9fa..0000000
--- a/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java
+++ /dev/null
@@ -1,1075 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.util.LinkedHashMap;
-import java.util.TreeMap;
-import java.util.jar.JarOutputStream;
-import java.util.zip.ZipEntry;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.LocalDirAllocator;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.MRConfig;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.mapreduce.security.TokenCache;
-import org.apache.hadoop.mapreduce.server.tasktracker.Localizer;
-import org.apache.hadoop.mapreduce.util.MRAsyncDiskService;
-
-import static org.apache.hadoop.mapred.QueueManager.toFullPropertyName;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Shell;
-import org.apache.hadoop.mapred.JvmManager.JvmEnv;
-import org.apache.hadoop.mapred.TaskController.JobInitializationContext;
-import org.apache.hadoop.mapred.TaskController.TaskControllerContext;
-import org.apache.hadoop.mapred.TaskTracker.RunningJob;
-import org.apache.hadoop.mapred.TaskTracker.TaskInProgress;
-import org.apache.hadoop.mapred.UtilsForTests.InlineCleanupQueue;
-
-import junit.framework.TestCase;
-
-/**
- * Test to verify localization of a job and localization of a task on a
- * TaskTracker.
- *
- */
-public class TestTaskTrackerLocalization extends TestCase {
-
- private static File TEST_ROOT_DIR =
- new File(System.getProperty("test.build.data", "/tmp"));
- private File ROOT_MAPRED_LOCAL_DIR;
- private File HADOOP_LOG_DIR;
- private static File PERMISSION_SCRIPT_DIR;
- private static File PERMISSION_SCRIPT_FILE;
- private static final String PERMISSION_SCRIPT_CONTENT = "ls -l -d $1 | " +
- "awk '{print $1\":\"$3\":\"$4}'";
-
- private int numLocalDirs = 6;
- private static final Log LOG =
- LogFactory.getLog(TestTaskTrackerLocalization.class);
-
- protected TaskTracker tracker;
- protected UserGroupInformation taskTrackerUGI;
- protected TaskController taskController;
- protected JobConf trackerFConf;
- private JobConf localizedJobConf;
- protected JobID jobId;
- protected TaskAttemptID taskId;
- protected Task task;
- protected String[] localDirs;
- protected static LocalDirAllocator lDirAlloc =
- new LocalDirAllocator(MRConfig.LOCAL_DIR);
- protected Path attemptWorkDir;
- protected File[] attemptLogFiles;
- protected JobConf localizedTaskConf;
- private TaskInProgress tip;
- private JobConf jobConf;
- private File jobConfFile;
-
- /**
- * Dummy method in this base class. Only derived classes will define this
- * method for checking if a test can be run.
- */
- protected boolean canRun() {
- return true;
- }
-
- @Override
- protected void setUp()
- throws Exception {
- if (!canRun()) {
- return;
- }
- TEST_ROOT_DIR =
- new File(System.getProperty("test.build.data", "/tmp"), getClass()
- .getSimpleName());
- if (!TEST_ROOT_DIR.exists()) {
- TEST_ROOT_DIR.mkdirs();
- }
-
- ROOT_MAPRED_LOCAL_DIR = new File(TEST_ROOT_DIR, "mapred/local");
- ROOT_MAPRED_LOCAL_DIR.mkdirs();
-
- HADOOP_LOG_DIR = new File(TEST_ROOT_DIR, "logs");
- HADOOP_LOG_DIR.mkdir();
- System.setProperty("hadoop.log.dir", HADOOP_LOG_DIR.getAbsolutePath());
-
- trackerFConf = new JobConf();
-
- trackerFConf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
- localDirs = new String[numLocalDirs];
- for (int i = 0; i < numLocalDirs; i++) {
- localDirs[i] = new File(ROOT_MAPRED_LOCAL_DIR, "0_" + i).getPath();
- }
- trackerFConf.setStrings(MRConfig.LOCAL_DIR, localDirs);
- trackerFConf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
-
- // Create the job configuration file. Same as trackerConf in this test.
- jobConf = new JobConf(trackerFConf);
- // Set job view ACLs in conf sothat validation of contents of jobACLsFile
- // can be done against this value. Have both users and groups
- String jobViewACLs = "user1,user2, group1,group2";
- jobConf.set(MRJobConfig.JOB_ACL_VIEW_JOB, jobViewACLs);
-
- jobConf.setInt(MRJobConfig.USER_LOG_RETAIN_HOURS, 0);
- jobConf.setUser(getJobOwner().getShortUserName());
-
- String queue = "default";
- // set job queue name in job conf
- jobConf.setQueueName(queue);
- // Set queue admins acl in job conf similar to what JobClient does so that
- // it goes into job conf also.
- jobConf.set(toFullPropertyName(queue,
- QueueACL.ADMINISTER_JOBS.getAclName()),
- "qAdmin1,qAdmin2 qAdminsGroup1,qAdminsGroup2");
-
- Job job = Job.getInstance(jobConf);
- String jtIdentifier = "200907202331";
- jobId = new JobID(jtIdentifier, 1);
-
- // JobClient uploads the job jar to the file system and sets it in the
- // jobConf.
- uploadJobJar(job);
-
- // JobClient uploads the jobConf to the file system.
- jobConfFile = uploadJobConf(job.getConfiguration());
-
- // create jobTokens file
- uploadJobTokensFile();
-
- taskTrackerUGI = UserGroupInformation.getCurrentUser();
- startTracker();
-
- // Set up the task to be localized
- taskId =
- new TaskAttemptID(jtIdentifier, jobId.getId(), TaskType.MAP, 1, 0);
- createTask();
-
- // mimic register task
- // create the tip
- tip = tracker.new TaskInProgress(task, trackerFConf);
- }
-
- private void startTracker() throws IOException {
- // Set up the TaskTracker
- tracker = new TaskTracker();
- tracker.setConf(trackerFConf);
- tracker.setTaskLogCleanupThread(new UserLogCleaner(trackerFConf));
- initializeTracker();
- }
-
- private void initializeTracker() throws IOException {
- tracker.setIndexCache(new IndexCache(trackerFConf));
- tracker.setTaskMemoryManagerEnabledFlag();
-
- // for test case system FS is the local FS
- tracker.systemFS = FileSystem.getLocal(trackerFConf);
- tracker.setLocalFileSystem(tracker.systemFS);
- tracker.systemDirectory = new Path(TEST_ROOT_DIR.getAbsolutePath());
-
- tracker.runningTasks = new LinkedHashMap<TaskAttemptID, TaskInProgress>();
- tracker.runningJobs = new TreeMap<JobID, RunningJob>();
- tracker.setAsyncDiskService(new MRAsyncDiskService(trackerFConf));
- tracker.getAsyncDiskService().cleanupAllVolumes();
-
- // Set up TaskTracker instrumentation
- tracker.setTaskTrackerInstrumentation(
- TaskTracker.createInstrumentation(tracker, trackerFConf));
-
- // setup task controller
- taskController = createTaskController();
- taskController.setConf(trackerFConf);
- taskController.setup();
- tracker.setTaskController(taskController);
- tracker.setLocalizer(new Localizer(tracker.getLocalFileSystem(), localDirs,
- taskController));
- }
-
- protected TaskController createTaskController() {
- return new DefaultTaskController();
- }
-
- private void createTask()
- throws IOException {
- task = new MapTask(jobConfFile.toURI().toString(), taskId, 1, null, 1);
- task.setConf(jobConf); // Set conf. Set user name in particular.
- task.setUser(jobConf.getUser());
- }
-
- protected UserGroupInformation getJobOwner() throws IOException {
- return UserGroupInformation.getCurrentUser();
- }
-
- /**
- * static block setting up the permission script which would be used by the
- * checkFilePermissions
- */
- static {
- PERMISSION_SCRIPT_DIR = new File(TEST_ROOT_DIR, "permission_script_dir");
- PERMISSION_SCRIPT_FILE = new File(PERMISSION_SCRIPT_DIR, "getperms.sh");
-
- if(PERMISSION_SCRIPT_FILE.exists()) {
- PERMISSION_SCRIPT_FILE.delete();
- }
-
- if(PERMISSION_SCRIPT_DIR.exists()) {
- PERMISSION_SCRIPT_DIR.delete();
- }
-
- PERMISSION_SCRIPT_DIR.mkdir();
-
- try {
- PrintWriter writer = new PrintWriter(PERMISSION_SCRIPT_FILE);
- writer.write(PERMISSION_SCRIPT_CONTENT);
- writer.close();
- } catch (FileNotFoundException fe) {
- fail();
- }
- PERMISSION_SCRIPT_FILE.setExecutable(true, true);
- }
-
- /**
- * @param job
- * @throws IOException
- * @throws FileNotFoundException
- */
- private void uploadJobJar(Job job)
- throws IOException,
- FileNotFoundException {
- File jobJarFile = new File(TEST_ROOT_DIR, "jobjar-on-dfs.jar");
- JarOutputStream jstream =
- new JarOutputStream(new FileOutputStream(jobJarFile));
- ZipEntry ze = new ZipEntry("lib/lib1.jar");
- jstream.putNextEntry(ze);
- jstream.closeEntry();
- ze = new ZipEntry("lib/lib2.jar");
- jstream.putNextEntry(ze);
- jstream.closeEntry();
- jstream.finish();
- jstream.close();
- job.setJar(jobJarFile.toURI().toString());
- }
-
- /**
- * @param conf
- * @return
- * @throws FileNotFoundException
- * @throws IOException
- */
- protected File uploadJobConf(Configuration conf)
- throws FileNotFoundException,
- IOException {
- File jobConfFile = new File(TEST_ROOT_DIR, "jobconf-on-dfs.xml");
- FileOutputStream out = new FileOutputStream(jobConfFile);
- conf.writeXml(out);
- out.close();
- return jobConfFile;
- }
-
- /**
- * create fake JobTokens file
- * @return
- * @throws IOException
- */
- protected void uploadJobTokensFile() throws IOException {
-
- File dir = new File(TEST_ROOT_DIR, jobId.toString());
- if(!dir.exists())
- assertTrue("faild to create dir="+dir.getAbsolutePath(), dir.mkdirs());
- // writing empty file, we don't need the keys for this test
- new Credentials().writeTokenStorageFile(new Path("file:///" + dir,
- TokenCache.JOB_TOKEN_HDFS_FILE), new Configuration());
- }
-
- @Override
- protected void tearDown()
- throws Exception {
- if (!canRun()) {
- return;
- }
- FileUtil.fullyDelete(TEST_ROOT_DIR);
- }
-
- protected static String[] getFilePermissionAttrs(String path)
- throws IOException {
- String[] command = {"bash",PERMISSION_SCRIPT_FILE.getAbsolutePath(), path};
- String output=Shell.execCommand(command);
- return output.split(":|\n");
- }
-
-
- /**
- * Utility method to check permission of a given path. Requires the permission
- * script directory to be setup in order to call.
- *
- *
- * @param path
- * @param expectedPermissions
- * @param expectedOwnerUser
- * @param expectedOwnerGroup
- * @throws IOException
- */
- static void checkFilePermissions(String path, String expectedPermissions,
- String expectedOwnerUser, String expectedOwnerGroup)
- throws IOException {
- String[] attrs = getFilePermissionAttrs(path);
- assertTrue("File attrs length is not 3 but " + attrs.length,
- attrs.length == 3);
- assertTrue("Path " + path + " has the permissions " + attrs[0]
- + " instead of the expected " + expectedPermissions, attrs[0]
- .equals(expectedPermissions));
- assertTrue("Path " + path + " is user owned not by " + expectedOwnerUser
- + " but by " + attrs[1], attrs[1].equals(expectedOwnerUser));
- assertTrue("Path " + path + " is group owned not by " + expectedOwnerGroup
- + " but by " + attrs[2], attrs[2].equals(expectedOwnerGroup));
- }
-
- /**
- * Verify the task-controller's setup functionality
- *
- * @throws IOException
- */
- public void testTaskControllerSetup()
- throws IOException {
- if (!canRun()) {
- return;
- }
- // Task-controller is already set up in the test's setup method. Now verify.
- for (String localDir : localDirs) {
-
- // Verify the local-dir itself.
- File lDir = new File(localDir);
- assertTrue("localDir " + lDir + " doesn't exists!", lDir.exists());
- checkFilePermissions(lDir.getAbsolutePath(), "drwxr-xr-x", task
- .getUser(), taskTrackerUGI.getGroupNames()[0]);
- }
-
- // Verify the pemissions on the userlogs dir
- File taskLog = TaskLog.getUserLogDir();
- checkFilePermissions(taskLog.getAbsolutePath(), "drwxr-xr-x", task
- .getUser(), taskTrackerUGI.getGroupNames()[0]);
- }
-
- /**
- * Test the localization of a user on the TT.
- *
- * @throws IOException
- */
- public void testUserLocalization()
- throws IOException {
- if (!canRun()) {
- return;
- }
- // /////////// The main method being tested
- tracker.getLocalizer().initializeUserDirs(task.getUser());
- // ///////////
-
- // Check the directory structure and permissions
- checkUserLocalization();
-
- // For the sake of testing re-entrancy of initializeUserDirs(), we remove
- // the user directories now and make sure that further calls of the method
- // don't create directories any more.
- for (String dir : localDirs) {
- File userDir = new File(dir, TaskTracker.getUserDir(task.getUser()));
- if (!FileUtil.fullyDelete(userDir)) {
- throw new IOException("Uanble to delete " + userDir);
- }
- }
-
- // Now call the method again.
- tracker.getLocalizer().initializeUserDirs(task.getUser());
-
- // Files should not be created now and so shouldn't be there anymore.
- for (String dir : localDirs) {
- File userDir = new File(dir, TaskTracker.getUserDir(task.getUser()));
- assertFalse("Unexpectedly, user-dir " + userDir.getAbsolutePath()
- + " exists!", userDir.exists());
- }
- }
-
- protected void checkUserLocalization()
- throws IOException {
- for (String dir : localDirs) {
-
- File localDir = new File(dir);
- assertTrue(MRConfig.LOCAL_DIR + localDir + " isn'task created!",
- localDir.exists());
-
- File taskTrackerSubDir = new File(localDir, TaskTracker.SUBDIR);
- assertTrue("taskTracker sub-dir in the local-dir " + localDir
- + "is not created!", taskTrackerSubDir.exists());
-
- File userDir = new File(taskTrackerSubDir, task.getUser());
- assertTrue("user-dir in taskTrackerSubdir " + taskTrackerSubDir
- + "is not created!", userDir.exists());
- checkFilePermissions(userDir.getAbsolutePath(), "drwx------", task
- .getUser(), taskTrackerUGI.getGroupNames()[0]);
-
- File jobCache = new File(userDir, TaskTracker.JOBCACHE);
- assertTrue("jobcache in the userDir " + userDir + " isn't created!",
- jobCache.exists());
- checkFilePermissions(jobCache.getAbsolutePath(), "drwx------", task
- .getUser(), taskTrackerUGI.getGroupNames()[0]);
-
- // Verify the distributed cache dir.
- File distributedCacheDir =
- new File(localDir, TaskTracker
- .getPrivateDistributedCacheDir(task.getUser()));
- assertTrue("distributed cache dir " + distributedCacheDir
- + " doesn't exists!", distributedCacheDir.exists());
- checkFilePermissions(distributedCacheDir.getAbsolutePath(),
- "drwx------", task.getUser(), taskTrackerUGI.getGroupNames()[0]);
- }
- }
-
- /**
- * Test job localization on a TT. Tests localization of job.xml, job.jar and
- * corresponding setting of configuration. Also test
- * {@link TaskController#initializeJob(JobInitializationContext)}
- *
- * @throws IOException
- */
- public void testJobLocalization()
- throws Exception {
- if (!canRun()) {
- return;
- }
- TaskTracker.RunningJob rjob = tracker.localizeJob(tip);
- localizedJobConf = rjob.getJobConf();
-
- checkJobLocalization();
- }
-
- /**
- * Test that, if the job log dir can't be created, the job will fail
- * during localization rather than at the time when the task itself
- * tries to write into it.
- */
- public void testJobLocalizationFailsIfLogDirUnwritable()
- throws Exception {
- if (!canRun()) {
- return;
- }
-
- File logDir = TaskLog.getJobDir(jobId);
- File logDirParent = logDir.getParentFile();
-
- try {
- assertTrue(logDirParent.mkdirs() || logDirParent.isDirectory());
- FileUtil.fullyDelete(logDir);
- FileUtil.chmod(logDirParent.getAbsolutePath(), "000");
-
- tracker.localizeJob(tip);
- fail("No exception");
- } catch (IOException ioe) {
- LOG.info("Got exception", ioe);
- assertTrue(ioe.getMessage().contains("Could not create job user log"));
- } finally {
- // Put it back just to be safe
- FileUtil.chmod(logDirParent.getAbsolutePath(), "755");
- }
- }
-
- protected void checkJobLocalization()
- throws IOException {
- // Check the directory structure
- for (String dir : localDirs) {
-
- File localDir = new File(dir);
- File taskTrackerSubDir = new File(localDir, TaskTracker.SUBDIR);
- File userDir = new File(taskTrackerSubDir, task.getUser());
- File jobCache = new File(userDir, TaskTracker.JOBCACHE);
-
- File jobDir = new File(jobCache, jobId.toString());
- assertTrue("job-dir in " + jobCache + " isn't created!", jobDir.exists());
-
- // check the private permissions on the job directory
- checkFilePermissions(jobDir.getAbsolutePath(), "drwx------", task
- .getUser(), taskTrackerUGI.getGroupNames()[0]);
- }
-
- // check the localization of job.xml
- assertTrue("job.xml is not localized on this TaskTracker!!", lDirAlloc
- .getLocalPathToRead(TaskTracker.getLocalJobConfFile(task.getUser(),
- jobId.toString()), trackerFConf) != null);
-
- // check the localization of job.jar
- Path jarFileLocalized =
- lDirAlloc.getLocalPathToRead(TaskTracker.getJobJarFile(task.getUser(),
- jobId.toString()), trackerFConf);
- assertTrue("job.jar is not localized on this TaskTracker!!",
- jarFileLocalized != null);
- assertTrue("lib/lib1.jar is not unjarred on this TaskTracker!!", new File(
- jarFileLocalized.getParent() + Path.SEPARATOR + "lib/lib1.jar")
- .exists());
- assertTrue("lib/lib2.jar is not unjarred on this TaskTracker!!", new File(
- jarFileLocalized.getParent() + Path.SEPARATOR + "lib/lib2.jar")
- .exists());
-
- // check the creation of job work directory
- assertTrue("job-work dir is not created on this TaskTracker!!", lDirAlloc
- .getLocalPathToRead(TaskTracker.getJobWorkDir(task.getUser(), jobId
- .toString()), trackerFConf) != null);
-
- // Check the setting of mapreduce.job.local.dir and job.jar which will eventually be
- // used by the user's task
- boolean jobLocalDirFlag = false, mapredJarFlag = false;
- String localizedJobLocalDir =
- localizedJobConf.get(TaskTracker.JOB_LOCAL_DIR);
- String localizedJobJar = localizedJobConf.getJar();
- for (String localDir : localizedJobConf.getStrings(MRConfig.LOCAL_DIR)) {
- if (localizedJobLocalDir.equals(localDir + Path.SEPARATOR
- + TaskTracker.getJobWorkDir(task.getUser(), jobId.toString()))) {
- jobLocalDirFlag = true;
- }
- if (localizedJobJar.equals(localDir + Path.SEPARATOR
- + TaskTracker.getJobJarFile(task.getUser(), jobId.toString()))) {
- mapredJarFlag = true;
- }
- }
- assertTrue(TaskTracker.JOB_LOCAL_DIR
- + " is not set properly to the target users directory : "
- + localizedJobLocalDir, jobLocalDirFlag);
- assertTrue(
- "mapreduce.job.jar is not set properly to the target users directory : "
- + localizedJobJar, mapredJarFlag);
-
- // check job user-log directory permissions
- File jobLogDir = TaskLog.getJobDir(jobId);
- assertTrue("job log directory " + jobLogDir + " does not exist!", jobLogDir
- .exists());
- checkFilePermissions(jobLogDir.toString(), "drwx------", task.getUser(),
- taskTrackerUGI.getGroupNames()[0]);
-
- // Make sure that the job ACLs file job-acls.xml exists in job userlog dir
- File jobACLsFile = new File(jobLogDir, TaskTracker.jobACLsFile);
- assertTrue("JobACLsFile is missing in the job userlog dir " + jobLogDir,
- jobACLsFile.exists());
-
- // With default task controller, the job-acls.xml file is owned by TT and
- // permissions are 700
- checkFilePermissions(jobACLsFile.getAbsolutePath(), "-rw-------",
- taskTrackerUGI.getShortUserName(), taskTrackerUGI.getGroupNames()[0]);
-
- validateJobACLsFileContent();
- }
-
- // Validate the contents of jobACLsFile ( i.e. user name, job-view-acl, queue
- // name and queue-admins-acl ).
- protected void validateJobACLsFileContent() {
- JobConf jobACLsConf = TaskLogServlet.getConfFromJobACLsFile(jobId);
-
- assertTrue(jobACLsConf.get("user.name").equals(
- localizedJobConf.getUser()));
- assertTrue(jobACLsConf.get(MRJobConfig.JOB_ACL_VIEW_JOB).
- equals(localizedJobConf.get(MRJobConfig.JOB_ACL_VIEW_JOB)));
-
- String queue = localizedJobConf.getQueueName();
- assertTrue(queue.equalsIgnoreCase(jobACLsConf.getQueueName()));
-
- String qACLName = toFullPropertyName(queue,
- QueueACL.ADMINISTER_JOBS.getAclName());
- assertTrue(jobACLsConf.get(qACLName).equals(
- localizedJobConf.get(qACLName)));
- }
-
- /**
- * Test task localization on a TT.
- *
- * @throws IOException
- */
- public void testTaskLocalization()
- throws Exception {
- if (!canRun()) {
- return;
- }
- TaskTracker.RunningJob rjob = tracker.localizeJob(tip);
- localizedJobConf = rjob.getJobConf();
- initializeTask();
-
- checkTaskLocalization();
- }
-
- private void initializeTask() throws IOException {
- tip.setJobConf(localizedJobConf);
-
- // ////////// The central method being tested
- tip.localizeTask(task);
- // //////////
-
- // check the functionality of localizeTask
- for (String dir : trackerFConf.getStrings(MRConfig.LOCAL_DIR)) {
- File attemptDir =
- new File(dir, TaskTracker.getLocalTaskDir(task.getUser(), jobId
- .toString(), taskId.toString(), task.isTaskCleanupTask()));
- assertTrue("attempt-dir " + attemptDir + " in localDir " + dir
- + " is not created!!", attemptDir.exists());
- }
-
- attemptWorkDir =
- lDirAlloc.getLocalPathToRead(TaskTracker.getTaskWorkDir(
- task.getUser(), task.getJobID().toString(), task.getTaskID()
- .toString(), task.isTaskCleanupTask()), trackerFConf);
- assertTrue("atttempt work dir for " + taskId.toString()
- + " is not created in any of the configured dirs!!",
- attemptWorkDir != null);
-
- TaskRunner runner = task.createRunner(tracker, tip);
- tip.setTaskRunner(runner);
-
- // /////// Few more methods being tested
- runner.setupChildTaskConfiguration(lDirAlloc);
- TaskRunner.createChildTmpDir(new File(attemptWorkDir.toUri().getPath()),
- localizedJobConf);
- attemptLogFiles = runner.prepareLogFiles(task.getTaskID(),
- task.isTaskCleanupTask());
-
- // Make sure the task-conf file is created
- Path localTaskFile =
- lDirAlloc.getLocalPathToRead(TaskTracker.getTaskConfFile(task
- .getUser(), task.getJobID().toString(), task.getTaskID()
- .toString(), task.isTaskCleanupTask()), trackerFConf);
- assertTrue("Task conf file " + localTaskFile.toString()
- + " is not created!!", new File(localTaskFile.toUri().getPath())
- .exists());
-
- // /////// One more method being tested. This happens in child space.
- localizedTaskConf = new JobConf(localTaskFile);
- TaskRunner.setupChildMapredLocalDirs(task, localizedTaskConf);
- // ///////
-
- // Initialize task via TaskController
- TaskControllerContext taskContext =
- new TaskController.TaskControllerContext();
- taskContext.env =
- new JvmEnv(null, null, null, null, -1, new File(localizedJobConf
- .get(TaskTracker.JOB_LOCAL_DIR)), null, localizedJobConf);
- taskContext.task = task;
- // /////////// The method being tested
- taskController.initializeTask(taskContext);
- // ///////////
- }
-
- protected void checkTaskLocalization()
- throws IOException {
- // Make sure that the mapreduce.cluster.local.dir is sandboxed
- for (String childMapredLocalDir : localizedTaskConf
- .getStrings(MRConfig.LOCAL_DIR)) {
- assertTrue("Local dir " + childMapredLocalDir + " is not sandboxed !!",
- childMapredLocalDir.endsWith(TaskTracker.getLocalTaskDir(task
- .getUser(), jobId.toString(), taskId.toString(),
- task.isTaskCleanupTask())));
- }
-
- // Make sure task task.getJobFile is changed and pointed correctly.
- assertTrue(task.getJobFile().endsWith(
- TaskTracker.getTaskConfFile(task.getUser(), jobId.toString(), taskId
- .toString(), task.isTaskCleanupTask())));
-
- // Make sure that the tmp directories are created
- assertTrue("tmp dir is not created in workDir "
- + attemptWorkDir.toUri().getPath(), new File(attemptWorkDir.toUri()
- .getPath(), "tmp").exists());
-
- // Make sure that the logs are setup properly
- File logDir = TaskLog.getAttemptDir(taskId, task.isTaskCleanupTask());
- assertTrue("task's log dir " + logDir.toString() + " doesn't exist!",
- logDir.exists());
- checkFilePermissions(logDir.getAbsolutePath(), "drwx------", task
- .getUser(), taskTrackerUGI.getGroupNames()[0]);
-
- File expectedStdout = new File(logDir, TaskLog.LogName.STDOUT.toString());
- assertTrue("stdout log file is improper. Expected : "
- + expectedStdout.toString() + " Observed : "
- + attemptLogFiles[0].toString(), expectedStdout.toString().equals(
- attemptLogFiles[0].toString()));
- File expectedStderr =
- new File(logDir, Path.SEPARATOR + TaskLog.LogName.STDERR.toString());
- assertTrue("stderr log file is improper. Expected : "
- + expectedStderr.toString() + " Observed : "
- + attemptLogFiles[1].toString(), expectedStderr.toString().equals(
- attemptLogFiles[1].toString()));
- }
-
- /**
- * Create a file in the given dir and set permissions r_xr_xr_x sothat no one
- * can delete it directly(without doing chmod).
- * Creates dir/subDir and dir/subDir/file
- */
- static void createFileAndSetPermissions(JobConf jobConf, Path dir)
- throws IOException {
- Path subDir = new Path(dir, "subDir");
- FileSystem fs = FileSystem.getLocal(jobConf);
- fs.mkdirs(subDir);
- Path p = new Path(subDir, "file");
- java.io.DataOutputStream out = fs.create(p);
- out.writeBytes("dummy input");
- out.close();
- // no write permission for subDir and subDir/file
- try {
- int ret = 0;
- if((ret = FileUtil.chmod(subDir.toUri().getPath(), "a=rx", true)) != 0) {
- LOG.warn("chmod failed for " + subDir + ";retVal=" + ret);
- }
- } catch(InterruptedException e) {
- LOG.warn("Interrupted while doing chmod for " + subDir);
- }
- }
-
- /**
- * Validates the removal of $taskid and $tasid/work under mapred-local-dir
- * in cases where those directories cannot be deleted without adding
- * write permission to the newly created directories under $taskid and
- * $taskid/work
- * Also see createFileAndSetPermissions for details
- */
- void validateRemoveTaskFiles(boolean needCleanup, boolean jvmReuse,
- TaskInProgress tip) throws IOException {
- // create files and set permissions 555. Verify if task controller sets
- // the permissions for TT to delete the taskDir or workDir
- String dir = (!needCleanup || jvmReuse) ?
- TaskTracker.getTaskWorkDir(task.getUser(), task.getJobID().toString(),
- taskId.toString(), task.isTaskCleanupTask())
- : TaskTracker.getLocalTaskDir(task.getUser(), task.getJobID().toString(),
- taskId.toString(), task.isTaskCleanupTask());
-
- Path[] paths = tracker.getLocalFiles(localizedJobConf, dir);
- assertTrue("No paths found", paths.length > 0);
- for (Path p : paths) {
- if (tracker.getLocalFileSystem().exists(p)) {
- createFileAndSetPermissions(localizedJobConf, p);
- }
- }
-
- InlineCleanupQueue cleanupQueue = new InlineCleanupQueue();
- tracker.setCleanupThread(cleanupQueue);
-
- tip.removeTaskFiles(needCleanup, taskId);
-
- if (jvmReuse) {
- // work dir should still exist and cleanup queue should be empty
- assertTrue("cleanup queue is not empty after removeTaskFiles() in case "
- + "of jvm reuse.", cleanupQueue.isQueueEmpty());
- boolean workDirExists = false;
- for (Path p : paths) {
- if (tracker.getLocalFileSystem().exists(p)) {
- workDirExists = true;
- }
- }
- assertTrue("work dir does not exist in case of jvm reuse", workDirExists);
-
- // now try to delete the work dir and verify that there are no stale paths
- JvmManager.deleteWorkDir(tracker, task);
- }
-
- assertTrue("Some task files are not deleted!! Number of stale paths is "
- + cleanupQueue.stalePaths.size(), cleanupQueue.stalePaths.size() == 0);
- }
-
- /**
- * Validates if task cleanup is done properly for a succeeded task
- * @throws IOException
- */
- public void testTaskFilesRemoval()
- throws Exception {
- if (!canRun()) {
- return;
- }
- testTaskFilesRemoval(false, false);// no needCleanup; no jvmReuse
- }
-
- /**
- * Validates if task cleanup is done properly for a task that is not succeeded
- * @throws IOException
- */
- public void testFailedTaskFilesRemoval()
- throws Exception {
- if (!canRun()) {
- return;
- }
- testTaskFilesRemoval(true, false);// needCleanup; no jvmReuse
-
- // initialize a cleanupAttempt for the task.
- task.setTaskCleanupTask();
- // localize task cleanup attempt
- initializeTask();
- checkTaskLocalization();
-
- // verify the cleanup of cleanup attempt.
- testTaskFilesRemoval(true, false);// needCleanup; no jvmReuse
- }
-
- /**
- * Validates if task cleanup is done properly for a succeeded task
- * @throws IOException
- */
- public void testTaskFilesRemovalWithJvmUse()
- throws Exception {
- if (!canRun()) {
- return;
- }
- testTaskFilesRemoval(false, true);// no needCleanup; jvmReuse
- }
-
- /**
- * Validates if task cleanup is done properly
- */
- private void testTaskFilesRemoval(boolean needCleanup, boolean jvmReuse)
- throws Exception {
- // Localize job and localize task.
- TaskTracker.RunningJob rjob = tracker.localizeJob(tip);
- localizedJobConf = rjob.getJobConf();
- if (jvmReuse) {
- localizedJobConf.setNumTasksToExecutePerJvm(2);
- }
- initializeTask();
-
- // TODO: Let the task run and create files.
-
- // create files and set permissions 555. Verify if task controller sets
- // the permissions for TT to delete the task dir or work dir properly
- validateRemoveTaskFiles(needCleanup, jvmReuse, tip);
- }
-
- /**
- * Test userlogs cleanup.
- *
- * @throws IOException
- */
- private void verifyUserLogsRemoval()
- throws IOException {
- // verify user logs cleanup
- File jobUserLogDir = TaskLog.getJobDir(jobId);
- // Logs should be there before cleanup.
- assertTrue("Userlogs dir " + jobUserLogDir + " is not present as expected!!",
- jobUserLogDir.exists());
- tracker.purgeJob(new KillJobAction(jobId));
- tracker.getTaskLogCleanupThread().processCompletedJobs();
-
- // Logs should be gone after cleanup.
- assertFalse("Userlogs dir " + jobUserLogDir + " is not deleted as expected!!",
- jobUserLogDir.exists());
- }
-
- /**
- * Test job cleanup by doing the following
- * - create files with no write permissions to TT under job-work-dir
- * - create files with no write permissions to TT under task-work-dir
- */
- public void testJobFilesRemoval() throws IOException, InterruptedException {
- if (!canRun()) {
- return;
- }
-
- LOG.info("Running testJobCleanup()");
- // Localize job and localize task.
- TaskTracker.RunningJob rjob = tracker.localizeJob(tip);
- localizedJobConf = rjob.getJobConf();
-
- // Set an inline cleanup queue
- InlineCleanupQueue cleanupQueue = new InlineCleanupQueue();
- tracker.setCleanupThread(cleanupQueue);
-
- // Create a file in job's work-dir with 555
- String jobWorkDir =
- TaskTracker.getJobWorkDir(task.getUser(), task.getJobID().toString());
- Path[] jPaths = tracker.getLocalFiles(localizedJobConf, jobWorkDir);
- assertTrue("No paths found for job", jPaths.length > 0);
- for (Path p : jPaths) {
- if (tracker.getLocalFileSystem().exists(p)) {
- createFileAndSetPermissions(localizedJobConf, p);
- }
- }
-
- // Initialize task dirs
- tip.setJobConf(localizedJobConf);
- tip.localizeTask(task);
-
- // Create a file in task local dir with 555
- // this is to simply test the case where the jvm reuse is enabled and some
- // files in task-attempt-local-dir are left behind to be cleaned up when the
- // job finishes.
- String taskLocalDir =
- TaskTracker.getLocalTaskDir(task.getUser(), task.getJobID().toString(),
- task.getTaskID().toString(), false);
- Path[] tPaths = tracker.getLocalFiles(localizedJobConf, taskLocalDir);
- assertTrue("No paths found for task", tPaths.length > 0);
- for (Path p : tPaths) {
- if (tracker.getLocalFileSystem().exists(p)) {
- createFileAndSetPermissions(localizedJobConf, p);
- }
- }
-
- // remove the job work dir
- tracker.removeJobFiles(task.getUser(), task.getJobID());
-
- // check the task-local-dir
- boolean tLocalDirExists = false;
- for (Path p : tPaths) {
- if (tracker.getLocalFileSystem().exists(p)) {
- tLocalDirExists = true;
- }
- }
- assertFalse("Task " + task.getTaskID() + " local dir exists after cleanup",
- tLocalDirExists);
-
- // Verify that the TaskTracker (via the task-controller) cleans up the dirs.
- // check the job-work-dir
- boolean jWorkDirExists = false;
- for (Path p : jPaths) {
- if (tracker.getLocalFileSystem().exists(p)) {
- jWorkDirExists = true;
- }
- }
- assertFalse("Job " + task.getJobID() + " work dir exists after cleanup",
- jWorkDirExists);
- // Test userlogs cleanup.
- verifyUserLogsRemoval();
-
- // Check that the empty $mapred.local.dir/taskTracker/$user dirs are still
- // there.
- for (String localDir : localDirs) {
- Path userDir =
- new Path(localDir, TaskTracker.getUserDir(task.getUser()));
- assertTrue("User directory " + userDir + " is not present!!",
- tracker.getLocalFileSystem().exists(userDir));
- }
- }
-
- /**
- * Tests TaskTracker restart after the localization.
- *
- * This tests the following steps:
- *
- * Localize Job, initialize a task.
- * Then restart the Tracker.
- * launch a cleanup attempt for the task.
- *
- * @throws IOException
- * @throws InterruptedException
- */
- public void testTrackerRestart() throws IOException, InterruptedException {
- if (!canRun()) {
- return;
- }
-
- // Localize job and localize task.
- TaskTracker.RunningJob rjob = tracker.localizeJob(tip);
- localizedJobConf = rjob.getJobConf();
- initializeTask();
-
- // imitate tracker restart
- startTracker();
-
- // create a task cleanup attempt
- createTask();
- task.setTaskCleanupTask();
- // register task
- tip = tracker.new TaskInProgress(task, trackerFConf);
-
- // localize the job again.
- rjob = tracker.localizeJob(tip);
- localizedJobConf = rjob.getJobConf();
- checkJobLocalization();
-
- // localize task cleanup attempt
- initializeTask();
- checkTaskLocalization();
- }
-
- /**
- * Tests TaskTracker re-init after the localization.
- *
- * This tests the following steps:
- *
- * Localize Job, initialize a task.
- * Then reinit the Tracker.
- * launch a cleanup attempt for the task.
- *
- * @throws IOException
- * @throws InterruptedException
- */
- public void testTrackerReinit() throws IOException, InterruptedException {
- if (!canRun()) {
- return;
- }
-
- // Localize job and localize task.
- TaskTracker.RunningJob rjob = tracker.localizeJob(tip);
- localizedJobConf = rjob.getJobConf();
- initializeTask();
-
- // imitate tracker reinit
- initializeTracker();
-
- // create a task cleanup attempt
- createTask();
- task.setTaskCleanupTask();
- // register task
- tip = tracker.new TaskInProgress(task, trackerFConf);
-
- // localize the job again.
- rjob = tracker.localizeJob(tip);
- localizedJobConf = rjob.getJobConf();
- checkJobLocalization();
-
- // localize task cleanup attempt
- initializeTask();
- checkTaskLocalization();
- }
-
- /**
- * Localizes a cleanup task and validates permissions.
- *
- * @throws InterruptedException
- * @throws IOException
- */
- public void testCleanupTaskLocalization() throws IOException,
- InterruptedException {
- if (!canRun()) {
- return;
- }
-
- task.setTaskCleanupTask();
- // register task
- tip = tracker.new TaskInProgress(task, trackerFConf);
-
- // localize the job.
- RunningJob rjob = tracker.localizeJob(tip);
- localizedJobConf = rjob.getJobConf();
- checkJobLocalization();
-
- // localize task cleanup attempt
- initializeTask();
- checkTaskLocalization();
-
- }
-}
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java b/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java
deleted file mode 100644
index 243a8e4..0000000
--- a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java
+++ /dev/null
@@ -1,430 +0,0 @@
-/** Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce.security;
-
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.mock;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.security.NoSuchAlgorithmException;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import javax.crypto.KeyGenerator;
-import javax.crypto.spec.SecretKeySpec;
-
-import org.apache.commons.codec.binary.Base64;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.viewfs.ViewFileSystem;
-import org.apache.hadoop.hdfs.HftpFileSystem;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.MiniMRCluster;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.mapreduce.SleepJob;
-import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.util.ToolRunner;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-
-public class TestTokenCache {
- private static final int NUM_OF_KEYS = 10;
-
- // my sleep class - adds check for tokenCache
- static class MySleepMapper extends SleepJob.SleepMapper {
- /**
- * attempts to access tokenCache as from client
- */
- @Override
- public void map(IntWritable key, IntWritable value, Context context)
- throws IOException, InterruptedException {
- // get token storage and a key
- Credentials ts = context.getCredentials();
- byte[] key1 = ts.getSecretKey(new Text("alias1"));
- Collection<Token<? extends TokenIdentifier>> dts = ts.getAllTokens();
- int dts_size = 0;
- if(dts != null)
- dts_size = dts.size();
-
-
- if(dts_size != 2) { // one job token and one delegation token
- throw new RuntimeException("tokens are not available"); // fail the test
- }
-
-
- if(key1 == null || ts == null || ts.numberOfSecretKeys() != NUM_OF_KEYS) {
- throw new RuntimeException("secret keys are not available"); // fail the test
- }
- super.map(key, value, context);
- }
- }
-
- class MySleepJob extends SleepJob {
- @Override
- public Job createJob(int numMapper, int numReducer,
- long mapSleepTime, int mapSleepCount,
- long reduceSleepTime, int reduceSleepCount)
- throws IOException {
- Job job = super.createJob(numMapper, numReducer,
- mapSleepTime, mapSleepCount,
- reduceSleepTime, reduceSleepCount);
-
- job.setMapperClass(MySleepMapper.class);
- //Populate tokens here because security is disabled.
- populateTokens(job);
- return job;
- }
-
- private void populateTokens(Job job) {
- // Credentials in the job will not have delegation tokens
- // because security is disabled. Fetch delegation tokens
- // and populate the credential in the job.
- try {
- Credentials ts = job.getCredentials();
- Path p1 = new Path("file1");
- p1 = p1.getFileSystem(job.getConfiguration()).makeQualified(p1);
- Credentials cred = new Credentials();
- TokenCache.obtainTokensForNamenodesInternal(cred, new Path[] { p1 },
- job.getConfiguration());
- for (Token<? extends TokenIdentifier> t : cred.getAllTokens()) {
- ts.addToken(new Text("Hdfs"), t);
- }
- } catch (IOException e) {
- Assert.fail("Exception " + e);
- }
- }
- }
-
- private static MiniMRCluster mrCluster;
- private static MiniDFSCluster dfsCluster;
- private static final Path TEST_DIR =
- new Path(System.getProperty("test.build.data","/tmp"), "sleepTest");
- private static final Path tokenFileName = new Path(TEST_DIR, "tokenFile.json");
- private static int numSlaves = 1;
- private static JobConf jConf;
- private static ObjectMapper mapper = new ObjectMapper();
- private static Path p1;
- private static Path p2;
-
- @BeforeClass
- public static void setUp() throws Exception {
-
- Configuration conf = new Configuration();
- conf.set("hadoop.security.auth_to_local", "RULE:[2:$1]");
- dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
- jConf = new JobConf(conf);
- mrCluster = new MiniMRCluster(0, 0, numSlaves,
- dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null,
- jConf);
-
- createTokenFileJson();
- verifySecretKeysInJSONFile();
- NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads();
- FileSystem fs = dfsCluster.getFileSystem();
-
- p1 = new Path("file1");
- p2 = new Path("file2");
-
- p1 = fs.makeQualified(p1);
- }
-
- @AfterClass
- public static void tearDown() throws Exception {
- if(mrCluster != null)
- mrCluster.shutdown();
- mrCluster = null;
- if(dfsCluster != null)
- dfsCluster.shutdown();
- dfsCluster = null;
- }
-
- // create jason file and put some keys into it..
- private static void createTokenFileJson() throws IOException {
- Map<String, String> map = new HashMap<String, String>();
-
- try {
- KeyGenerator kg = KeyGenerator.getInstance("HmacSHA1");
- for(int i=0; i<NUM_OF_KEYS; i++) {
- SecretKeySpec key = (SecretKeySpec) kg.generateKey();
- byte [] enc_key = key.getEncoded();
- map.put("alias"+i, new String(Base64.encodeBase64(enc_key)));
-
- }
- } catch (NoSuchAlgorithmException e) {
- throw new IOException(e);
- }
-
- try {
- File p = new File(tokenFileName.getParent().toString());
- p.mkdirs();
- // convert to JSON and save to the file
- mapper.writeValue(new File(tokenFileName.toString()), map);
-
- } catch (Exception e) {
- System.out.println("failed with :" + e.getLocalizedMessage());
- }
- }
-
- @SuppressWarnings("unchecked")
- private static void verifySecretKeysInJSONFile() throws IOException {
- Map<String, String> map;
- map = mapper.readValue(new File(tokenFileName.toString()), Map.class);
- assertEquals("didn't read JSON correctly", map.size(), NUM_OF_KEYS);
- }
-
- /**
- * run a distributed job and verify that TokenCache is available
- * @throws IOException
- */
- @Test
- public void testTokenCache() throws IOException {
-
- System.out.println("running dist job");
-
- // make sure JT starts
- jConf = mrCluster.createJobConf();
-
- // provide namenodes names for the job to get the delegation tokens for
- String nnUri = dfsCluster.getURI(0).toString();
- jConf.set(MRJobConfig.JOB_NAMENODES, nnUri + "," + nnUri);
- // job tracker principla id..
- jConf.set(JTConfig.JT_USER_NAME, "jt_id/foo@BAR");
-
- // using argument to pass the file name
- String[] args = {
- "-tokenCacheFile", tokenFileName.toString(),
- "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
- };
-
- int res = -1;
- try {
- res = ToolRunner.run(jConf, new MySleepJob(), args);
- } catch (Exception e) {
- System.out.println("Job failed with" + e.getLocalizedMessage());
- e.printStackTrace(System.out);
- fail("Job failed");
- }
- assertEquals("dist job res is not 0", res, 0);
- }
-
- /**
- * run a local job and verify that TokenCache is available
- * @throws NoSuchAlgorithmException
- * @throws IOException
- */
- @Test
- public void testLocalJobTokenCache() throws NoSuchAlgorithmException, IOException {
-
- System.out.println("running local job");
- // this is local job
- String[] args = {"-m", "1", "-r", "1", "-mt", "1", "-rt", "1"};
- jConf.set("mapreduce.job.credentials.json", tokenFileName.toString());
-
- int res = -1;
- try {
- res = ToolRunner.run(jConf, new MySleepJob(), args);
- } catch (Exception e) {
- System.out.println("Job failed with" + e.getLocalizedMessage());
- e.printStackTrace(System.out);
- fail("local Job failed");
- }
- assertEquals("local job res is not 0", res, 0);
- }
-
- @Test
- public void testGetTokensForNamenodes() throws IOException {
-
- Credentials credentials = new Credentials();
- TokenCache.obtainTokensForNamenodesInternal(credentials, new Path[] { p1,
- p2 }, jConf);
-
- // this token is keyed by hostname:port key.
- String fs_addr =
- SecurityUtil.buildDTServiceName(p1.toUri(), NameNode.DEFAULT_PORT);
- Token<DelegationTokenIdentifier> nnt = TokenCache.getDelegationToken(
- credentials, fs_addr);
- System.out.println("dt for " + p1 + "(" + fs_addr + ")" + " = " + nnt);
- assertNotNull("Token for nn is null", nnt);
-
- // verify the size
- Collection<Token<? extends TokenIdentifier>> tns = credentials.getAllTokens();
- assertEquals("number of tokens is not 1", 1, tns.size());
-
- boolean found = false;
- for(Token<? extends TokenIdentifier> t: tns) {
- if(t.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND) &&
- t.getService().equals(new Text(fs_addr))) {
- found = true;
- }
- assertTrue("didn't find token for " + p1 ,found);
- }
- }
-
- @Test
- public void testGetTokensForHftpFS() throws IOException, URISyntaxException {
- HftpFileSystem hfs = mock(HftpFileSystem.class);
-
- DelegationTokenSecretManager dtSecretManager =
- NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem());
- String renewer = "renewer";
- jConf.set(JTConfig.JT_USER_NAME,renewer);
- DelegationTokenIdentifier dtId =
- new DelegationTokenIdentifier(new Text("user"), new Text(renewer), null);
- final Token<DelegationTokenIdentifier> t =
- new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
-
- final URI uri = new URI("hftp://host:2222/file1");
- final String fs_addr =
- SecurityUtil.buildDTServiceName(uri, NameNode.DEFAULT_PORT);
- t.setService(new Text(fs_addr));
-
- //when(hfs.getUri()).thenReturn(uri);
- Mockito.doAnswer(new Answer<URI>(){
- @Override
- public URI answer(InvocationOnMock invocation)
- throws Throwable {
- return uri;
- }}).when(hfs).getUri();
-
- //when(hfs.getDelegationToken()).thenReturn((Token<? extends TokenIdentifier>) t);
- Mockito.doAnswer(new Answer<Token<DelegationTokenIdentifier>>(){
- @Override
- public Token<DelegationTokenIdentifier> answer(InvocationOnMock invocation)
- throws Throwable {
- return t;
- }}).when(hfs).getDelegationToken(renewer);
-
- //when(hfs.getDelegationTokens()).thenReturn((Token<? extends TokenIdentifier>) t);
- Mockito.doAnswer(new Answer<List<Token<DelegationTokenIdentifier>>>(){
- @Override
- public List<Token<DelegationTokenIdentifier>> answer(InvocationOnMock invocation)
- throws Throwable {
- return Collections.singletonList(t);
- }}).when(hfs).getDelegationTokens(renewer);
-
- //when(hfs.getCanonicalServiceName).thenReturn(fs_addr);
- Mockito.doAnswer(new Answer<String>(){
- @Override
- public String answer(InvocationOnMock invocation)
- throws Throwable {
- return fs_addr;
- }}).when(hfs).getCanonicalServiceName();
-
- Credentials credentials = new Credentials();
- Path p = new Path(uri.toString());
- System.out.println("Path for hftp="+ p + "; fs_addr="+fs_addr + "; rn=" + renewer);
- TokenCache.obtainTokensForNamenodesInternal(hfs, credentials, jConf);
-
- Collection<Token<? extends TokenIdentifier>> tns = credentials.getAllTokens();
- assertEquals("number of tokens is not 1", 1, tns.size());
-
- boolean found = false;
- for(Token<? extends TokenIdentifier> tt: tns) {
- System.out.println("token="+tt);
- if(tt.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND) &&
- tt.getService().equals(new Text(fs_addr))) {
- found = true;
- assertEquals("different token", tt, t);
- }
- assertTrue("didn't find token for " + p, found);
- }
- }
-
- /**
- * verify _HOST substitution
- * @throws IOException
- */
- @Test
- public void testGetJTPrincipal() throws IOException {
- String serviceName = "jt/";
- String hostName = "foo";
- String domainName = "@BAR";
- Configuration conf = new Configuration();
- conf.set(JTConfig.JT_IPC_ADDRESS, hostName + ":8888");
- conf.set(JTConfig.JT_USER_NAME, serviceName + SecurityUtil.HOSTNAME_PATTERN
- + domainName);
- assertEquals("Failed to substitute HOSTNAME_PATTERN with hostName",
- serviceName + hostName + domainName, TokenCache.getJTPrincipal(conf));
- }
-
- @Test
- public void testGetTokensForViewFS() throws IOException, URISyntaxException {
- Configuration conf = new Configuration(jConf);
- FileSystem dfs = dfsCluster.getFileSystem();
- String serviceName = dfs.getCanonicalServiceName();
-
- Path p1 = new Path("/mount1");
- Path p2 = new Path("/mount2");
- p1 = dfs.makeQualified(p1);
- p2 = dfs.makeQualified(p2);
-
- conf.set("fs.viewfs.mounttable.default.link./dir1", p1.toString());
- conf.set("fs.viewfs.mounttable.default.link./dir2", p2.toString());
- Credentials credentials = new Credentials();
- Path lp1 = new Path("viewfs:///dir1");
- Path lp2 = new Path("viewfs:///dir2");
- Path[] paths = new Path[2];
- paths[0] = lp1;
- paths[1] = lp2;
- TokenCache.obtainTokensForNamenodesInternal(credentials, paths, conf);
-
- Collection<Token<? extends TokenIdentifier>> tns =
- credentials.getAllTokens();
- assertEquals("number of tokens is not 1", 1, tns.size());
-
- boolean found = false;
- for (Token<? extends TokenIdentifier> tt : tns) {
- System.out.println("token=" + tt);
- if (tt.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND)
- && tt.getService().equals(new Text(serviceName))) {
- found = true;
- }
- assertTrue("didn't find token for [" + lp1 + ", " + lp2 + "]", found);
- }
- }
-}
diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/util/TestLinuxResourceCalculatorPlugin.java b/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/util/TestLinuxResourceCalculatorPlugin.java
deleted file mode 100644
index 8f0010c..0000000
--- a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/util/TestLinuxResourceCalculatorPlugin.java
+++ /dev/null
@@ -1,236 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce.util;
-
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.util.Random;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapred.TaskTrackerStatus;
-import org.junit.Test;
-
-/**
- * A JUnit test to test {@link LinuxResourceCalculatorPlugin}
- * Create the fake /proc/ information and verify the parsing and calculation
- */
-public class TestLinuxResourceCalculatorPlugin extends TestCase {
- /**
- * LinuxResourceCalculatorPlugin with a fake timer
- */
- static class FakeLinuxResourceCalculatorPlugin extends
- LinuxResourceCalculatorPlugin {
-
- long currentTime = 0;
- public FakeLinuxResourceCalculatorPlugin(String procfsMemFile,
- String procfsCpuFile,
- String procfsStatFile,
- long jiffyLengthInMillis) {
- super(procfsMemFile, procfsCpuFile, procfsStatFile, jiffyLengthInMillis);
- }
- @Override
- long getCurrentTime() {
- return currentTime;
- }
- public void advanceTime(long adv) {
- currentTime += adv * jiffyLengthInMillis;
- }
- }
- private static final FakeLinuxResourceCalculatorPlugin plugin;
- private static String TEST_ROOT_DIR = new Path(System.getProperty(
- "test.build.data", "/tmp")).toString().replace(' ', '+');
- private static final String FAKE_MEMFILE;
- private static final String FAKE_CPUFILE;
- private static final String FAKE_STATFILE;
- private static final long FAKE_JIFFY_LENGTH = 10L;
- static {
- int randomNum = (new Random()).nextInt(1000000000);
- FAKE_MEMFILE = TEST_ROOT_DIR + File.separator + "MEMINFO_" + randomNum;
- FAKE_CPUFILE = TEST_ROOT_DIR + File.separator + "CPUINFO_" + randomNum;
- FAKE_STATFILE = TEST_ROOT_DIR + File.separator + "STATINFO_" + randomNum;
- plugin = new FakeLinuxResourceCalculatorPlugin(FAKE_MEMFILE, FAKE_CPUFILE,
- FAKE_STATFILE,
- FAKE_JIFFY_LENGTH);
- }
- static final String MEMINFO_FORMAT =
- "MemTotal: %d kB\n" +
- "MemFree: %d kB\n" +
- "Buffers: 138244 kB\n" +
- "Cached: 947780 kB\n" +
- "SwapCached: 142880 kB\n" +
- "Active: 3229888 kB\n" +
- "Inactive: %d kB\n" +
- "SwapTotal: %d kB\n" +
- "SwapFree: %d kB\n" +
- "Dirty: 122012 kB\n" +
- "Writeback: 0 kB\n" +
- "AnonPages: 2710792 kB\n" +
- "Mapped: 24740 kB\n" +
- "Slab: 132528 kB\n" +
- "SReclaimable: 105096 kB\n" +
- "SUnreclaim: 27432 kB\n" +
- "PageTables: 11448 kB\n" +
- "NFS_Unstable: 0 kB\n" +
- "Bounce: 0 kB\n" +
- "CommitLimit: 4125904 kB\n" +
- "Committed_AS: 4143556 kB\n" +
- "VmallocTotal: 34359738367 kB\n" +
- "VmallocUsed: 1632 kB\n" +
- "VmallocChunk: 34359736375 kB\n" +
- "HugePages_Total: 0\n" +
- "HugePages_Free: 0\n" +
- "HugePages_Rsvd: 0\n" +
- "Hugepagesize: 2048 kB";
-
- static final String CPUINFO_FORMAT =
- "processor : %s\n" +
- "vendor_id : AuthenticAMD\n" +
- "cpu family : 15\n" +
- "model : 33\n" +
- "model name : Dual Core AMD Opteron(tm) Processor 280\n" +
- "stepping : 2\n" +
- "cpu MHz : %f\n" +
- "cache size : 1024 KB\n" +
- "physical id : 0\n" +
- "siblings : 2\n" +
- "core id : 0\n" +
- "cpu cores : 2\n" +
- "fpu : yes\n" +
- "fpu_exception : yes\n" +
- "cpuid level : 1\n" +
- "wp : yes\n" +
- "flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov " +
- "pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt lm " +
- "3dnowext 3dnow pni lahf_lm cmp_legacy\n" +
- "bogomips : 4792.41\n" +
- "TLB size : 1024 4K pages\n" +
- "clflush size : 64\n" +
- "cache_alignment : 64\n" +
- "address sizes : 40 bits physical, 48 bits virtual\n" +
- "power management: ts fid vid ttp";
-
- static final String STAT_FILE_FORMAT =
- "cpu %d %d %d 1646495089 831319 48713 164346 0\n" +
- "cpu0 15096055 30805 3823005 411456015 206027 13 14269 0\n" +
- "cpu1 14760561 89890 6432036 408707910 456857 48074 130857 0\n" +
- "cpu2 12761169 20842 3758639 413976772 98028 411 10288 0\n" +
- "cpu3 12355207 47322 5789691 412354390 70406 213 8931 0\n" +
- "intr 114648668 20010764 2 0 945665 2 0 0 0 0 0 0 0 4 0 0 0 0 0 0\n" +
- "ctxt 242017731764\n" +
- "btime 1257808753\n" +
- "processes 26414943\n" +
- "procs_running 1\n" +
- "procs_blocked 0\n";
-
- /**
- * Test parsing /proc/stat and /proc/cpuinfo
- * @throws IOException
- */
- @Test
- public void testParsingProcStatAndCpuFile() throws IOException {
- // Write fake /proc/cpuinfo file.
- long numProcessors = 8;
- long cpuFrequencyKHz = 2392781;
- String fileContent = "";
- for (int i = 0; i < numProcessors; i++) {
- fileContent += String.format(CPUINFO_FORMAT, i, cpuFrequencyKHz / 1000D) +
- "\n";
- }
- File tempFile = new File(FAKE_CPUFILE);
- tempFile.deleteOnExit();
- FileWriter fWriter = new FileWriter(FAKE_CPUFILE);
- fWriter.write(fileContent);
- fWriter.close();
- assertEquals(plugin.getNumProcessors(), numProcessors);
- assertEquals(plugin.getCpuFrequency(), cpuFrequencyKHz);
-
- // Write fake /proc/stat file.
- long uTime = 54972994;
- long nTime = 188860;
- long sTime = 19803373;
- tempFile = new File(FAKE_STATFILE);
- tempFile.deleteOnExit();
- updateStatFile(uTime, nTime, sTime);
- assertEquals(plugin.getCumulativeCpuTime(),
- FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
- assertEquals(plugin.getCpuUsage(), (float)(TaskTrackerStatus.UNAVAILABLE));
-
- // Advance the time and sample again to test the CPU usage calculation
- uTime += 100L;
- plugin.advanceTime(200L);
- updateStatFile(uTime, nTime, sTime);
- assertEquals(plugin.getCumulativeCpuTime(),
- FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
- assertEquals(plugin.getCpuUsage(), 6.25F);
-
- // Advance the time and sample again. This time, we call getCpuUsage() only.
- uTime += 600L;
- plugin.advanceTime(300L);
- updateStatFile(uTime, nTime, sTime);
- assertEquals(plugin.getCpuUsage(), 25F);
-
- // Advance very short period of time (one jiffy length).
- // In this case, CPU usage should not be updated.
- uTime += 1L;
- plugin.advanceTime(1L);
- updateStatFile(uTime, nTime, sTime);
- assertEquals(plugin.getCumulativeCpuTime(),
- FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
- assertEquals(plugin.getCpuUsage(), 25F); // CPU usage is not updated.
- }
-
- /**
- * Write information to fake /proc/stat file
- */
- private void updateStatFile(long uTime, long nTime, long sTime)
- throws IOException {
- FileWriter fWriter = new FileWriter(FAKE_STATFILE);
- fWriter.write(String.format(STAT_FILE_FORMAT, uTime, nTime, sTime));
- fWriter.close();
- }
-
- /**
- * Test parsing /proc/meminfo
- * @throws IOException
- */
- @Test
- public void testParsingProcMemFile() throws IOException {
- long memTotal = 4058864L;
- long memFree = 99632L;
- long inactive = 567732L;
- long swapTotal = 2096472L;
- long swapFree = 1818480L;
- File tempFile = new File(FAKE_MEMFILE);
- tempFile.deleteOnExit();
- FileWriter fWriter = new FileWriter(FAKE_MEMFILE);
- fWriter.write(String.format(MEMINFO_FORMAT,
- memTotal, memFree, inactive, swapTotal, swapFree));
-
- fWriter.close();
- assertEquals(plugin.getAvailablePhysicalMemorySize(),
- 1024L * (memFree + inactive));
- assertEquals(plugin.getAvailableVirtualMemorySize(),
- 1024L * (memFree + inactive + swapFree));
- assertEquals(plugin.getPhysicalMemorySize(), 1024L * memTotal);
- assertEquals(plugin.getVirtualMemorySize(), 1024L * (memTotal + swapTotal));
- }
-}